]> code.delx.au - webdl/blobdiff - common.py
Handle URLs without scheme, fixes SBS issue #27
[webdl] / common.py
index c2badff307d177f75cc8fa74398eb158a51b03ff..fb1cf161f9d1007fe960c4ddf60adc7aa2740877 100644 (file)
--- a/common.py
+++ b/common.py
@@ -1,21 +1,19 @@
-from lxml import etree, html
-import cookielib
+import python2_compat
+
+import hashlib
+import http.cookiejar
 import json
-try:
-    import hashlib
-except ImportError:
-    import md5 as hashlib
+import logging
+import lxml.etree
+import lxml.html
 import os
 import re
 import shutil
 import signal
 import subprocess
-import sys
-import tempfile
 import time
-import urllib
-import urllib2
-import urlparse
+import urllib.parse
+import urllib.request
 
 
 try:
@@ -24,9 +22,20 @@ try:
 except ImportError:
     pass
 
-CACHE_DIR = os.path.join(os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")), "webdl")
+
+logging.basicConfig(
+    format = "%(levelname)s %(message)s",
+    level = logging.INFO if os.environ.get("DEBUG", None) is None else logging.DEBUG,
+)
+
+CACHE_DIR = os.path.join(
+    os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")),
+    "webdl"
+)
+
 USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:21.0) Gecko/20100101 Firefox/21.0"
 
+
 class Node(object):
     def __init__(self, title, parent=None):
         self.title = title
@@ -57,9 +66,6 @@ def load_root_node():
     import sbs
     sbs.fill_nodes(root_node)
 
-###    import plus7
-###    plus7.fill_nodes(root_node)
-
     import brightcove
     brightcove.fill_nodes(root_node)
 
@@ -67,39 +73,51 @@ def load_root_node():
 
 valid_chars = frozenset("-_.()!@#%^ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
 def sanify_filename(filename):
-    filename = filename.encode("ascii", "ignore")
     filename = "".join(c for c in filename if c in valid_chars)
+    assert len(filename) > 0
     return filename
 
-cookiejar = cookielib.CookieJar()
-urlopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
+def ensure_scheme(url):
+    parts = urllib.parse.urlparse(url)
+    if parts.scheme:
+        return url
+    parts = list(parts)
+    parts[0] = "http"
+    return urllib.parse.urlunparse(parts)
+
+cookiejar = http.cookiejar.CookieJar()
+urlopener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookiejar))
 def _urlopen(url, referrer=None):
-    req = urllib2.Request(url)
+    url = ensure_scheme(url)
+    req = urllib.request.Request(url)
     req.add_header("User-Agent", USER_AGENT)
     if referrer:
         req.add_header("Referer", referrer)
     return urlopener.open(req)
 
 def urlopen(url, max_age):
-### print url
+    logging.debug("urlopen(%r, %r)", url, max_age)
+
     if not os.path.isdir(CACHE_DIR):
         os.makedirs(CACHE_DIR)
 
     if max_age <= 0:
         return _urlopen(url)
 
-    filename = hashlib.md5(url).hexdigest()
+    filename = hashlib.md5(url.encode("utf-8")).hexdigest()
     filename = os.path.join(CACHE_DIR, filename)
     if os.path.exists(filename):
         file_age = int(time.time()) - os.path.getmtime(filename)
         if file_age < max_age:
-            return open(filename)
+            logging.debug("loading from cache: %s", filename)
+            return open(filename, "rb")
 
+    logging.debug("downloading: %s -> %s", url, filename)
     src = _urlopen(url)
     dst = open(filename, "wb")
     try:
         shutil.copyfileobj(src, dst)
-    except Exception, e:
+    except Exception as e:
         try:
             os.unlink(filename)
         except OSError:
@@ -108,7 +126,7 @@ def urlopen(url, max_age):
     src.close()
     dst.close()
 
-    return open(filename)
+    return open(filename, "rb")
 
 def grab_text(url, max_age):
     f = urlopen(url, max_age)
@@ -118,45 +136,47 @@ def grab_text(url, max_age):
 
 def grab_html(url, max_age):
     f = urlopen(url, max_age)
-    doc = html.parse(f, html.HTMLParser(encoding="utf-8", recover=True))
+    doc = lxml.html.parse(f, lxml.html.HTMLParser(encoding="utf-8", recover=True))
     f.close()
     return doc
 
 def grab_xml(url, max_age):
     f = urlopen(url, max_age)
-    doc = etree.parse(f, etree.XMLParser(encoding="utf-8", recover=True))
+    doc = lxml.etree.parse(f, lxml.etree.XMLParser(encoding="utf-8", recover=True))
     f.close()
     return doc
 
 def grab_json(url, max_age, skip_assignment=False, skip_function=False):
     f = urlopen(url, max_age)
+    text = f.read().decode("utf-8")
+
     if skip_assignment:
-        text = f.read()
         pos = text.find("=")
-        doc = json.loads(text[pos+1:])
+        text = text[pos+1:]
+
     elif skip_function:
-        text = f.read()
         pos = text.find("(")
         rpos = text.rfind(")")
-        doc = json.loads(text[pos+1:rpos])
-    else:
-        doc = json.load(f)
+        text = text[pos+1:rpos]
+
+    doc = json.loads(text)
     f.close()
     return doc
 
 def exec_subprocess(cmd):
+    logging.debug("Executing: %s", cmd)
     try:
         p = subprocess.Popen(cmd)
         ret = p.wait()
         if ret != 0:
-            print >>sys.stderr, cmd[0], "exited with error code:", ret
+            logging.error("%s exited with error code: %s", cmd[0], ret)
             return False
         else:
             return True
-    except OSError, e:
-        print >>sys.stderr, "Failed to run", cmd[0], e
+    except OSError as e:
+        logging.error("Failed to run: %s -- %s", cmd[0], e)
     except KeyboardInterrupt:
-        print "Cancelled", cmd
+        logging.info("Cancelled: %s", cmd)
         try:
             p.terminate()
             p.wait()
@@ -197,7 +217,7 @@ def generate_remux_cmd(infile, outfile):
     raise Exception("You must install ffmpeg or libav-tools")
 
 def remux(infile, outfile):
-    print "Converting %s to mp4" % infile
+    logging.info("Converting %s to mp4", infile)
     cmd = generate_remux_cmd(infile, outfile)
     if not exec_subprocess(cmd):
         # failed, error has already been logged
@@ -209,18 +229,18 @@ def remux(infile, outfile):
             os.unlink(infile)
             return True
         else:
-            print >>sys.stderr, "The size of", outfile, "is suspicious, did avconv fail?"
+            logging.error("The size of %s is suspicious, did the remux fail?", outfile)
             return False
-    except Exception, e:
-        print >>sys.stderr, "Conversion failed", e
+    except Exception as e:
+        logging.error("Conversion failed! %s", e)
         return False
 
 def convert_to_mp4(filename):
-    with open(filename) as f:
+    with open(filename, "rb") as f:
         fourcc = f.read(4)
     basename, ext = os.path.splitext(filename)
 
-    if ext == ".mp4" and fourcc == "FLV\x01":
+    if ext == ".mp4" and fourcc == b"FLV\x01":
         os.rename(filename, basename + ".flv")
         ext = ".flv"
         filename = basename + ext
@@ -234,12 +254,18 @@ def convert_to_mp4(filename):
 
 def download_hds(filename, video_url, pvswf=None):
     filename = sanify_filename(filename)
-    video_url = video_url.replace("http://", "hds://")
-    print "Downloading: %s" % filename
+    logging.info("Downloading: %s", filename)
+
+    video_url = "hds://" + video_url
+    if pvswf:
+        param = "%s pvswf=%s" % (video_url, pvswf)
+    else:
+        param = video_url
+
     cmd = [
         "livestreamer",
         "-o", filename,
-        "%s pvswf=%s" % (video_url, pvswf),
+        param,
         "best",
     ]
     if exec_subprocess(cmd):
@@ -249,8 +275,9 @@ def download_hds(filename, video_url, pvswf=None):
 
 def download_hls(filename, video_url):
     filename = sanify_filename(filename)
-    video_url = video_url.replace("http://", "hlsvariant://")
-    print "Downloading: %s" % filename
+    video_url = "hlsvariant://" + video_url
+    logging.info("Downloading: %s", filename)
+
     cmd = [
         "livestreamer",
         "-o", filename,
@@ -262,19 +289,16 @@ def download_hls(filename, video_url):
     else:
         return False
 
-def download_rtmp(filename, vbase, vpath, hash_url=None):
+def download_http(filename, video_url):
     filename = sanify_filename(filename)
-    print "Downloading: %s" % filename
-    if vpath.endswith(".flv"):
-        vpath = vpath[:-4]
+    logging.info("Downloading: %s", filename)
+
     cmd = [
-        "rtmpdump",
+        "curl",
+        "--fail", "--retry", "3",
         "-o", filename,
-        "-r", vbase,
-        "-y", vpath,
+        video_url,
     ]
-    if hash_url is not None:
-        cmd += ["--swfVfy", hash_url]
     if exec_subprocess(cmd):
         return convert_to_mp4(filename)
     else:
@@ -290,7 +314,7 @@ def natural_sort(l, key=None):
         for c in re.split("([0-9]+)", k):
             c = c.strip()
             if c.isdigit():
-                newk.append(int(c))
+                newk.append(c.zfill(5))
             else:
                 for subc in c.split():
                     if subc not in ignore_list:
@@ -300,14 +324,14 @@ def natural_sort(l, key=None):
     return sorted(l, key=key_func)
 
 def append_to_qs(url, params):
-    r = list(urlparse.urlsplit(url))
-    qs = urlparse.parse_qs(r[3])
-    for k, v in params.iteritems():
+    r = list(urllib.parse.urlsplit(url))
+    qs = urllib.parse.parse_qs(r[3])
+    for k, v in params.items():
         if v is not None:
             qs[k] = v
-        elif qs.has_key(k):
+        elif k in qs:
             del qs[k]
-    r[3] = urllib.urlencode(qs, True)
-    url = urlparse.urlunsplit(r)
+    r[3] = urllib.parse.urlencode(sorted(qs.items()), True)
+    url = urllib.parse.urlunsplit(r)
     return url