]> code.delx.au - webdl/blobdiff - common.py
iView handle missing episodes in collection
[webdl] / common.py
index 214e0159b77e11025da3bb4560b62898cc47a572..f0f827bcd4e13e5e4801302e4199fa0f79c6e91b 100644 (file)
--- a/common.py
+++ b/common.py
@@ -1,21 +1,18 @@
-from lxml import etree, html
-import cookielib
+import hashlib
+import io
 import json
-try:
-    import hashlib
-except ImportError:
-    import md5 as hashlib
+import logging
+import lxml.etree
+import lxml.html
 import os
 import re
+import requests
+import requests_cache
 import shutil
 import signal
 import subprocess
-import sys
-import tempfile
 import time
-import urllib
-import urllib2
-import urlparse
+import urllib.parse
 
 
 try:
@@ -24,8 +21,22 @@ try:
 except ImportError:
     pass
 
-CACHE_DIR = os.path.join(os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")), "webdl")
-USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:21.0) Gecko/20100101 Firefox/21.0"
+
+logging.basicConfig(
+    format = "%(levelname)s %(message)s",
+    level = logging.INFO if os.environ.get("DEBUG", None) is None else logging.DEBUG,
+)
+
+CACHE_FILE = os.path.join(
+    os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")),
+    "webdl",
+    "requests_cache"
+)
+if not os.path.isdir(os.path.dirname(CACHE_FILE)):
+    os.makedirs(os.path.dirname(CACHE_FILE))
+
+requests_cache.install_cache(CACHE_FILE, backend='sqlite', expire_after=3600)
+
 
 class Node(object):
     def __init__(self, title, parent=None):
@@ -57,106 +68,70 @@ def load_root_node():
     import sbs
     sbs.fill_nodes(root_node)
 
-    import plus7
-    plus7.fill_nodes(root_node)
-
-    import brightcove
-    brightcove.fill_nodes(root_node)
+    import ten
+    ten.fill_nodes(root_node)
 
     return root_node
 
 valid_chars = frozenset("-_.()!@#%^ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
 def sanify_filename(filename):
-    filename = filename.encode("ascii", "ignore")
     filename = "".join(c for c in filename if c in valid_chars)
+    assert len(filename) > 0
     return filename
 
-cookiejar = cookielib.CookieJar()
-urlopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
-def _urlopen(url, referrer=None):
-    req = urllib2.Request(url)
-    req.add_header("User-Agent", USER_AGENT)
-    if referrer:
-        req.add_header("Referer", referrer)
-    return urlopener.open(req)
-
-def urlopen(url, max_age):
-### print url
-    if not os.path.isdir(CACHE_DIR):
-        os.makedirs(CACHE_DIR)
-
-    if max_age <= 0:
-        return _urlopen(url)
-
-    filename = hashlib.md5(url).hexdigest()
-    filename = os.path.join(CACHE_DIR, filename)
-    if os.path.exists(filename):
-        file_age = int(time.time()) - os.path.getmtime(filename)
-        if file_age < max_age:
-            return open(filename)
-
-    src = _urlopen(url)
-    dst = open(filename, "wb")
-    try:
-        shutil.copyfileobj(src, dst)
-    except Exception, e:
-        try:
-            os.unlink(filename)
-        except OSError:
-            pass
-        raise e
-    src.close()
-    dst.close()
-
-    return open(filename)
-
-def grab_text(url, max_age):
-    f = urlopen(url, max_age)
-    text = f.read().decode("utf-8")
-    f.close()
-    return text
-
-def grab_html(url, max_age):
-    f = urlopen(url, max_age)
-    doc = html.parse(f, html.HTMLParser(encoding="utf-8", recover=True))
-    f.close()
+def ensure_scheme(url):
+    parts = urllib.parse.urlparse(url)
+    if parts.scheme:
+        return url
+    parts = list(parts)
+    parts[0] = "http"
+    return urllib.parse.urlunparse(parts)
+
+http_session = requests.Session()
+http_session.headers["User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:21.0) Gecko/20100101 Firefox/21.0"
+
+def grab_text(url):
+    logging.debug("grab_text(%r)", url)
+    request = http_session.prepare_request(requests.Request("GET", url))
+    response = http_session.send(request)
+    return response.text
+
+def grab_html(url):
+    logging.debug("grab_html(%r)", url)
+    request = http_session.prepare_request(requests.Request("GET", url))
+    response = http_session.send(request, stream=True)
+    doc = lxml.html.parse(io.BytesIO(response.content), lxml.html.HTMLParser(encoding="utf-8", recover=True))
+    response.close()
     return doc
 
-def grab_xml(url, max_age):
-    f = urlopen(url, max_age)
-    doc = etree.parse(f, etree.XMLParser(encoding="utf-8", recover=True))
-    f.close()
+def grab_xml(url):
+    logging.debug("grab_xml(%r)", url)
+    request = http_session.prepare_request(requests.Request("GET", url))
+    response = http_session.send(request, stream=True)
+    doc = lxml.etree.parse(io.BytesIO(response.content), lxml.etree.XMLParser(encoding="utf-8", recover=True))
+    response.close()
     return doc
 
-def grab_json(url, max_age, skip_assignment=False, skip_function=False):
-    f = urlopen(url, max_age)
-    if skip_assignment:
-        text = f.read()
-        pos = text.find("=")
-        doc = json.loads(text[pos+1:])
-    elif skip_function:
-        text = f.read()
-        pos = text.find("(")
-        rpos = text.rfind(")")
-        doc = json.loads(text[pos+1:rpos])
-    else:
-        doc = json.load(f)
-    f.close()
-    return doc
+def grab_json(url):
+    logging.debug("grab_json(%r)", url)
+    request = http_session.prepare_request(requests.Request("GET", url))
+    response = http_session.send(request)
+    return response.json()
 
 def exec_subprocess(cmd):
+    logging.debug("Executing: %s", cmd)
     try:
         p = subprocess.Popen(cmd)
         ret = p.wait()
         if ret != 0:
-            print >>sys.stderr, cmd[0], "exited with error code:", ret
+            logging.error("%s exited with error code: %s", cmd[0], ret)
             return False
         else:
             return True
-    except OSError, e:
-        print >>sys.stderr, "Failed to run", cmd[0], e
+    except OSError as e:
+        logging.error("Failed to run: %s -- %s", cmd[0], e)
     except KeyboardInterrupt:
-        print "Cancelled", cmd
+        logging.info("Cancelled: %s", cmd)
         try:
             p.terminate()
             p.wait()
@@ -168,59 +143,116 @@ def exec_subprocess(cmd):
 
 def check_command_exists(cmd):
     try:
-        subprocess.check_output(cmd)
+        subprocess.check_output(cmd, stderr=subprocess.STDOUT)
         return True
     except Exception:
         return False
 
-def generate_remux_cmd(infile, outfile):
-    if check_command_exists(["avconv", "--help"]):
-        return [
-            "avconv",
-            "-i", infile,
-            "-bsf:a", "aac_adtstoasc",
-            "-acodec", "copy",
-            "-vcodec", "copy",
-            outfile,
-        ]
-
-    if check_command_exists(["ffmpeg", "--help"]):
-        return [
-            "ffmpeg",
-            "-i", infile,
-            "-bsf:a", "aac_adtstoasc",
-            "-acodec", "copy",
-            "-vcodec", "copy",
-            outfile,
-        ]
+def find_ffmpeg():
+    for ffmpeg in ["avconv", "ffmpeg"]:
+        if check_command_exists([ffmpeg, "--help"]):
+            return ffmpeg
 
     raise Exception("You must install ffmpeg or libav-tools")
 
+def find_ffprobe():
+    for ffprobe in ["avprobe", "ffprobe"]:
+        if check_command_exists([ffprobe, "--help"]):
+            return ffprobe
+
+    raise Exception("You must install ffmpeg or libav-tools")
+
+def find_streamlink():
+    for streamlink in ["streamlink", "livestreamer"]:
+        if check_command_exists([streamlink, "--help"]):
+            return streamlink
+
+    raise Exception("You must install streamlink or livestreamer")
+
+def get_duration(filename):
+    ffprobe = find_ffprobe()
+
+    cmd = [
+        ffprobe,
+        filename,
+        "-show_format_entry", "duration",
+        "-v", "quiet",
+    ]
+    output = subprocess.check_output(cmd).decode("utf-8")
+    for line in output.split("\n"):
+        m = re.search(R"([0-9]+)", line)
+        if not m:
+            continue
+        duration = m.group(1)
+        if duration.isdigit():
+            return int(duration)
+
+
+    logging.debug("Falling back to full decode to find duration: %s % filename")
+
+    ffmpeg = find_ffmpeg()
+    cmd = [
+        ffmpeg,
+        "-i", filename,
+        "-vn",
+        "-f", "null", "-",
+    ]
+    output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+    duration = None
+    for line in re.split(R"[\r\n]", output):
+        m = re.search(R"time=([0-9:]*)\.", line)
+        if not m:
+            continue
+        [h, m, s] = m.group(1).split(":")
+        # ffmpeg prints the duration as it reads the file, we want the last one
+        duration = int(h) * 3600 + int(m) * 60 + int(s)
+
+    if duration:
+        return duration
+    else:
+        raise Exception("Unable to determine video duration of " + filename)
+
+def check_video_durations(flv_filename, mp4_filename):
+    flv_duration = get_duration(flv_filename)
+    mp4_duration = get_duration(mp4_filename)
+
+    if abs(flv_duration - mp4_duration) > 1:
+        logging.error(
+            "The duration of %s is suspicious, did the remux fail? Expected %s == %s",
+            mp4_filename, flv_duration, mp4_duration
+        )
+        return False
+
+    return True
+
 def remux(infile, outfile):
-    print "Converting %s to mp4" % infile
-    cmd = generate_remux_cmd(infile, outfile)
+    logging.info("Converting %s to mp4", infile)
+
+    ffmpeg = find_ffmpeg()
+    cmd = [
+        ffmpeg,
+        "-i", infile,
+        "-bsf:a", "aac_adtstoasc",
+        "-acodec", "copy",
+        "-vcodec", "copy",
+        "-y",
+        outfile,
+    ]
     if not exec_subprocess(cmd):
-        # failed, error has already been logged
         return False
-    try:
-        flv_size = os.stat(infile).st_size
-        mp4_size = os.stat(outfile).st_size
-        if abs(flv_size - mp4_size) < 0.1 * flv_size:
-            os.unlink(infile)
-            return True
-        else:
-            print >>sys.stderr, "The size of", outfile, "is suspicious, did avconv fail?"
-            return False
-    except Exception, e:
-        print >>sys.stderr, "Conversion failed", e
+
+    if not check_video_durations(infile, outfile):
         return False
 
+    os.unlink(infile)
+    return True
+
 def convert_to_mp4(filename):
-    with open(filename) as f:
+    with open(filename, "rb") as f:
         fourcc = f.read(4)
     basename, ext = os.path.splitext(filename)
 
-    if ext == ".mp4" and fourcc == "FLV\x01":
+    if ext == ".mp4" and fourcc == b"FLV\x01":
         os.rename(filename, basename + ".flv")
         ext = ".flv"
         filename = basename + ext
@@ -232,137 +264,82 @@ def convert_to_mp4(filename):
     return ext == ".mp4"
 
 
-def download_rtmp(filename, vbase, vpath, hash_url=None):
+def download_hds(filename, video_url, pvswf=None):
+    streamlink = find_streamlink()
+
     filename = sanify_filename(filename)
-    print "Downloading: %s" % filename
-    if vpath.endswith(".flv"):
-        vpath = vpath[:-4]
+    logging.info("Downloading: %s", filename)
+
+    video_url = "hds://" + video_url
+    if pvswf:
+        param = "%s pvswf=%s" % (video_url, pvswf)
+    else:
+        param = video_url
+
     cmd = [
-        "rtmpdump",
+        streamlink,
+        "-f",
         "-o", filename,
-        "-r", vbase,
-        "-y", vpath,
+        param,
+        "best",
     ]
-    if hash_url is not None:
-        cmd += ["--swfVfy", hash_url]
     if exec_subprocess(cmd):
         return convert_to_mp4(filename)
     else:
         return False
 
-def download_urllib(filename, url, referrer=None):
-    filename = sanify_filename(filename)
-    print "Downloading: %s" % filename
-    try:
-        src = _urlopen(url, referrer)
-        dst = open(filename, "wb")
-        while True:
-            buf = src.read(1024*1024)
-            if not buf:
-                break
-            dst.write(buf)
-            sys.stdout.write(".")
-            sys.stdout.flush()
-        print
-    except KeyboardInterrupt:
-        print "\nCancelled", url
-        return False
-    finally:
-        try:
-            src.close()
-        except:
-            pass
-        try:
-            dst.close()
-        except:
-            pass
-
-    return convert_to_mp4(filename)
-
-def download_hls_get_stream(url):
-    def parse_bandwidth(line):
-        params = line.split(":", 1)[1].split(",")
-        for kv in params:
-            k, v = kv.split("=", 1)
-            if k == "BANDWIDTH":
-                return int(v)
-        return 0
-
-    m3u8 = grab_text(url, 0)
-    best_bandwidth = None
-    best_url = None
-    for line in m3u8.split("\n"):
-        if line.startswith("#EXT-X-STREAM-INF:"):
-            bandwidth = parse_bandwidth(line)
-            if best_bandwidth is None or bandwidth > best_bandwidth:
-                best_bandwidth = bandwidth
-                best_url = None
-        elif not line.startswith("#"):
-            if best_url is None:
-                best_url = line.strip()
-
-    if not best_url:
-        raise Exception("Failed to find best stream for HLS: " + url)
-
-    return best_url
-
-def download_hls_segments(outf, url):
-    m3u8 = grab_text(url, 0)
-
-    fail_if_not_last_segment = None
-    for line in m3u8.split("\n"):
-        if not line.strip() or line.startswith("#"):
-            continue
+def download_hls(filename, video_url):
+    streamlink = find_streamlink()
 
-        if fail_if_not_last_segment:
-            raise e
-
-        try:
-            download_hls_fetch_segment(outf, line)
-        except urllib2.HTTPError, e:
-            fail_if_not_last_segment = e
-            continue
-        sys.stdout.write(".")
-        sys.stdout.flush()
+    filename = sanify_filename(filename)
+    video_url = "hlsvariant://" + video_url
+    logging.info("Downloading: %s", filename)
 
-    sys.stdout.write("\n")
+    cmd = [
+        streamlink,
+        "-f",
+        "-o", filename,
+        video_url,
+        "best",
+    ]
+    if exec_subprocess(cmd):
+        return convert_to_mp4(filename)
+    else:
+        return False
 
-def download_hls_fetch_segment(outf, segment_url):
-    try:
-        src = _urlopen(segment_url)
-        shutil.copyfileobj(src, outf)
-    except:
-        raise
-    finally:
-        try:
-            src.close()
-        except:
-            pass
+def download_mpd(filename, video_url):
+    streamlink = find_streamlink()
 
-def download_hls(filename, m3u8_master_url, hack_url_func=None):
     filename = sanify_filename(filename)
-    print "Downloading: %s" % filename
+    video_url = "dash://" + video_url
+    logging.info("Downloading: %s", filename)
 
-    if hack_url_func is None:
-        hack_url_func = lambda url: url
+    cmd = [
+        streamlink,
+        "-f",
+        "-o", filename,
+        video_url,
+        "best",
+    ]
+    if exec_subprocess(cmd):
+        return convert_to_mp4(filename)
+    else:
+        return False
 
-    tmpdir = tempfile.mkdtemp(prefix="webdl-hls")
+def download_http(filename, video_url):
+    filename = sanify_filename(filename)
+    logging.info("Downloading: %s", filename)
 
-    try:
-        best_stream_url = download_hls_get_stream(hack_url_func(m3u8_master_url))
-        ts_file = open(filename, "wb")
-        download_hls_segments(ts_file, hack_url_func(best_stream_url))
-    except KeyboardInterrupt:
-        print "\nCancelled", m3u8_master_url
+    cmd = [
+        "curl",
+        "--fail", "--retry", "3",
+        "-o", filename,
+        video_url,
+    ]
+    if exec_subprocess(cmd):
+        return convert_to_mp4(filename)
+    else:
         return False
-    finally:
-        shutil.rmtree(tmpdir)
-        try:
-            ts_file.close()
-        except:
-            pass
-
-    return convert_to_mp4(filename)
 
 def natural_sort(l, key=None):
     ignore_list = ["a", "the"]
@@ -374,7 +351,7 @@ def natural_sort(l, key=None):
         for c in re.split("([0-9]+)", k):
             c = c.strip()
             if c.isdigit():
-                newk.append(int(c))
+                newk.append(c.zfill(5))
             else:
                 for subc in c.split():
                     if subc not in ignore_list:
@@ -384,14 +361,14 @@ def natural_sort(l, key=None):
     return sorted(l, key=key_func)
 
 def append_to_qs(url, params):
-    r = list(urlparse.urlsplit(url))
-    qs = urlparse.parse_qs(r[3])
-    for k, v in params.iteritems():
+    r = list(urllib.parse.urlsplit(url))
+    qs = urllib.parse.parse_qs(r[3])
+    for k, v in params.items():
         if v is not None:
             qs[k] = v
-        elif qs.has_key(k):
+        elif k in qs:
             del qs[k]
-    r[3] = urllib.urlencode(qs, True)
-    url = urlparse.urlunsplit(r)
+    r[3] = urllib.parse.urlencode(sorted(qs.items()), True)
+    url = urllib.parse.urlunsplit(r)
     return url