-# vim:ts=4:sts=4:sw=4:noet
-
-from lxml import etree, html
+import hashlib
+import io
import json
-try:
- import hashlib
-except ImportError:
- import md5 as hashlib
+import logging
+import lxml.etree
+import lxml.html
import os
+import re
+import requests
+import requests_cache
import shutil
import signal
import subprocess
import sys
-import tempfile
import time
-import urllib
+import urllib.parse
+
+USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0"
+
+try:
+ import autosocks
+ autosocks.try_autosocks()
+except ImportError:
+ pass
-import autosocks
-autosocks.try_autosocks()
+logging.basicConfig(
+ format = "%(levelname)s %(message)s",
+ level = logging.INFO if os.environ.get("DEBUG", None) is None else logging.DEBUG,
+ stream = sys.stdout,
+)
+
+CACHE_FILE = os.path.join(
+ os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")),
+ "webdl",
+ "requests_cache"
+)
+if not os.path.isdir(os.path.dirname(CACHE_FILE)):
+ os.makedirs(os.path.dirname(CACHE_FILE))
+
+requests_cache.install_cache(CACHE_FILE, backend='sqlite', expire_after=3600)
-CACHE_DIR = os.path.expanduser("~/.cache/webdl")
class Node(object):
- def __init__(self, title, parent=None):
- self.title = title
- if parent:
- parent.children.append(self)
- self.parent = parent
- self.children = []
- self.can_download = False
+ def __init__(self, title, parent=None):
+ self.title = title
+ if parent:
+ parent.children.append(self)
+ self.parent = parent
+ self.children = []
+ self.can_download = False
- def get_children(self):
- if not self.children:
- self.fill_children()
- return self.children
+ def get_children(self):
+ if not self.children:
+ self.fill_children()
+ self.children = natural_sort(self.children, key=lambda node: node.title)
+ return self.children
- def fill_children(self):
- pass
+ def fill_children(self):
+ pass
- def download(self):
- raise NotImplemented
+ def download(self):
+ raise NotImplemented
def load_root_node():
- root_node = Node("Root")
+ root_node = Node("Root")
+
+ import iview
+ iview.fill_nodes(root_node)
- import iview
- iview.fill_nodes(root_node)
+ import sbs
+ sbs.fill_nodes(root_node)
- import sbs
- sbs.fill_nodes(root_node)
+ import ten
+ ten.fill_nodes(root_node)
- return root_node
+ return root_node
-valid_chars = frozenset("-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
+valid_chars = frozenset("-_.()!@#%^ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def sanify_filename(filename):
- filename = filename.encode("ascii", "ignore")
filename = "".join(c for c in filename if c in valid_chars)
+ assert len(filename) > 0
return filename
+def ensure_scheme(url):
+ parts = urllib.parse.urlparse(url)
+ if parts.scheme:
+ return url
+ parts = list(parts)
+ parts[0] = "http"
+ return urllib.parse.urlunparse(parts)
+
+http_session = requests.Session()
+http_session.headers["User-Agent"] = USER_AGENT
+
+def grab_text(url):
+ logging.debug("grab_text(%r)", url)
+ request = http_session.prepare_request(requests.Request("GET", url))
+ response = http_session.send(request)
+ return response.text
+
+def grab_html(url):
+ logging.debug("grab_html(%r)", url)
+ request = http_session.prepare_request(requests.Request("GET", url))
+ response = http_session.send(request, stream=True)
+ doc = lxml.html.parse(io.BytesIO(response.content), lxml.html.HTMLParser(encoding="utf-8", recover=True))
+ response.close()
+ return doc
-def urlopen(url, max_age):
-### print url
- if not os.path.isdir(CACHE_DIR):
- os.makedirs(CACHE_DIR)
-
- if max_age <= 0:
- return urllib.urlopen(url)
-
- filename = hashlib.md5(url).hexdigest()
- filename = os.path.join(CACHE_DIR, filename)
- if os.path.exists(filename):
- file_age = int(time.time()) - os.path.getmtime(filename)
- if file_age < max_age:
- return open(filename)
-
- src = urllib.urlopen(url)
- dst = open(filename, "w")
- try:
- shutil.copyfileobj(src, dst)
- except Exception, e:
- try:
- os.unlink(filename)
- except OSError:
- pass
- raise e
- src.close()
- dst.close()
-
- return open(filename)
-
-def grab_html(url, max_age):
- f = urlopen(url, max_age)
- doc = html.parse(f)
- f.close()
- return doc
-
-def grab_xml(url, max_age):
- f = urlopen(url, max_age)
- doc = etree.parse(f)
- f.close()
- return doc
-
-def grab_json(url, max_age):
- f = urlopen(url, max_age)
- doc = json.load(f)
- f.close()
- return doc
+def grab_xml(url):
+ logging.debug("grab_xml(%r)", url)
+ request = http_session.prepare_request(requests.Request("GET", url))
+ response = http_session.send(request, stream=True)
+ doc = lxml.etree.parse(io.BytesIO(response.content), lxml.etree.XMLParser(encoding="utf-8", recover=True))
+ response.close()
+ return doc
+
+def grab_json(url):
+ logging.debug("grab_json(%r)", url)
+ request = http_session.prepare_request(requests.Request("GET", url))
+ response = http_session.send(request)
+ return response.json()
def exec_subprocess(cmd):
- try:
- p = subprocess.Popen(cmd)
- ret = p.wait()
- if ret != 0:
- print >>sys.stderr, cmd[0], "exited with error code:", ret
- return False
- else:
- return True
- except OSError, e:
- print >>sys.stderr, "Failed to run", cmd[0], e
- except KeyboardInterrupt:
- print "Cancelled", cmd
- try:
- p.terminate()
- p.wait()
- except KeyboardInterrupt:
- p.send_signal(signal.SIGKILL)
- p.wait()
- return False
-
-
-def convert_flv_mp4(orig_filename):
- basename = os.path.splitext(orig_filename)[0]
- flv_filename = basename + ".flv"
- mp4_filename = basename + ".mp4"
- os.rename(orig_filename, flv_filename)
- print "Converting %s to mp4" % flv_filename
- cmd = [
- "ffmpeg",
- "-i", flv_filename,
- "-acodec", "copy",
- "-vcodec", "copy",
- mp4_filename,
- ]
- if not exec_subprocess(cmd):
- return
- try:
- flv_size = os.stat(flv_filename).st_size
- mp4_size = os.stat(mp4_filename).st_size
- if abs(flv_size - mp4_size) < 0.05 * flv_size:
- os.unlink(flv_filename)
- else:
- print >>sys.stderr, "The size of", mp4_filename, "is suspicious, did ffmpeg fail?"
- except Exception, e:
- print "Conversion failed", e
-
-def convert_filename(filename):
- if filename.lower().endswith(".mp4"):
- f = open(filename)
- fourcc = f.read(4)
- f.close()
- if fourcc == "FLV\x01":
- convert_flv_mp4(filename)
-
-def download_rtmp(filename, vbase, vpath, hash_url=None):
- filename = sanify_filename(filename)
- print "Downloading: %s" % filename
- if vpath.endswith(".flv"):
- vpath = vpath[:-4]
- cmd = [
- "rtmpdump",
- "-o", filename,
- "-r", vbase,
- "-y", vpath,
- ]
- if hash_url is not None:
- cmd += ["--swfVfy", hash_url]
- success = exec_subprocess(cmd)
- convert_filename(filename)
- return success
-
-def download_urllib(filename, url):
- filename = sanify_filename(filename)
- print "Downloading: %s" % filename
- try:
- src = urllib.urlopen(url)
- dst = open(filename, "w")
- while True:
- buf = src.read(1024*1024)
- if not buf:
- break
- dst.write(buf)
- sys.stdout.write(".")
- sys.stdout.flush()
- convert_filename(filename)
- return True
- except KeyboardInterrupt:
- print "\nCancelled", url
- finally:
- try:
- src.close()
- except:
- pass
- try:
- dst.close()
- except:
- pass
- return False
+ logging.debug("Executing: %s", cmd)
+ try:
+ p = subprocess.Popen(cmd)
+ ret = p.wait()
+ if ret != 0:
+ logging.error("%s exited with error code: %s", cmd[0], ret)
+ return False
+ else:
+ return True
+ except OSError as e:
+ logging.error("Failed to run: %s -- %s", cmd[0], e)
+ except KeyboardInterrupt:
+ logging.info("Cancelled: %s", cmd)
+ try:
+ p.terminate()
+ p.wait()
+ except KeyboardInterrupt:
+ p.send_signal(signal.SIGKILL)
+ p.wait()
+ return False
+
+
+def check_command_exists(cmd):
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ return True
+ except Exception:
+ return False
+
+def find_ffmpeg():
+ if check_command_exists(["ffmpeg", "--help"]):
+ return "ffmpeg"
+
+ if check_command_exists(["avconv", "--help"]):
+ logging.warn("Detected libav-tools! ffmpeg is recommended")
+ return "avconv"
+
+ raise Exception("You must install ffmpeg or libav-tools")
+
+def find_ffprobe():
+ if check_command_exists(["ffprobe", "--help"]):
+ return "ffprobe"
+
+ if check_command_exists(["avprobe", "--help"]):
+ logging.warn("Detected libav-tools! ffmpeg is recommended")
+ return "avprobe"
+
+ raise Exception("You must install ffmpeg or libav-tools")
+
+def get_duration(filename):
+ ffprobe = find_ffprobe()
+
+ cmd = [
+ ffprobe,
+ filename,
+ "-show_format_entry", "duration",
+ "-v", "quiet",
+ ]
+ output = subprocess.check_output(cmd).decode("utf-8")
+ for line in output.split("\n"):
+ m = re.search(R"([0-9]+)", line)
+ if not m:
+ continue
+ duration = m.group(1)
+ if duration.isdigit():
+ return int(duration)
+
+
+ logging.debug("Falling back to full decode to find duration: %s % filename")
+
+ ffmpeg = find_ffmpeg()
+ cmd = [
+ ffmpeg,
+ "-i", filename,
+ "-vn",
+ "-f", "null", "-",
+ ]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ duration = None
+ for line in re.split(R"[\r\n]", output):
+ m = re.search(R"time=([0-9:]*)\.", line)
+ if not m:
+ continue
+ [h, m, s] = m.group(1).split(":")
+ # ffmpeg prints the duration as it reads the file, we want the last one
+ duration = int(h) * 3600 + int(m) * 60 + int(s)
+
+ if duration:
+ return duration
+ else:
+ raise Exception("Unable to determine video duration of " + filename)
+
+def check_video_durations(flv_filename, mp4_filename):
+ flv_duration = get_duration(flv_filename)
+ mp4_duration = get_duration(mp4_filename)
+
+ if abs(flv_duration - mp4_duration) > 1:
+ logging.error(
+ "The duration of %s is suspicious, did the remux fail? Expected %s == %s",
+ mp4_filename, flv_duration, mp4_duration
+ )
+ return False
+
+ return True
+
+def remux(infile, outfile):
+ logging.info("Converting %s to mp4", infile)
+
+ ffmpeg = find_ffmpeg()
+ cmd = [
+ ffmpeg,
+ "-i", infile,
+ "-bsf:a", "aac_adtstoasc",
+ "-acodec", "copy",
+ "-vcodec", "copy",
+ "-y",
+ outfile,
+ ]
+ if not exec_subprocess(cmd):
+ return False
+
+ if not check_video_durations(infile, outfile):
+ return False
+
+ os.unlink(infile)
+ return True
+
+def convert_to_mp4(filename):
+ with open(filename, "rb") as f:
+ fourcc = f.read(4)
+ basename, ext = os.path.splitext(filename)
+
+ if ext == ".mp4" and fourcc == b"FLV\x01":
+ os.rename(filename, basename + ".flv")
+ ext = ".flv"
+ filename = basename + ext
+
+ if ext in (".flv", ".ts"):
+ filename_mp4 = basename + ".mp4"
+ return remux(filename, filename_mp4)
+
+ return ext == ".mp4"
+
+
+def download_hds(filename, video_url, pvswf=None):
+ filename = sanify_filename(filename)
+ logging.info("Downloading: %s", filename)
+
+ video_url = "hds://" + video_url
+ if pvswf:
+ param = "%s pvswf=%s" % (video_url, pvswf)
+ else:
+ param = video_url
+
+ cmd = [
+ "streamlink",
+ "--force",
+ "--output", filename,
+ param,
+ "best",
+ ]
+ if exec_subprocess(cmd):
+ return convert_to_mp4(filename)
+ else:
+ return False
+
+def download_hls(filename, video_url):
+ filename = sanify_filename(filename)
+ video_url = "hlsvariant://" + video_url
+ logging.info("Downloading: %s", filename)
+
+ cmd = [
+ "streamlink",
+ "--http-header", "User-Agent=" + USER_AGENT,
+ "--force",
+ "--output", filename,
+ video_url,
+ "best",
+ ]
+ if exec_subprocess(cmd):
+ return convert_to_mp4(filename)
+ else:
+ return False
+
+def download_mpd(filename, video_url):
+ filename = sanify_filename(filename)
+ video_url = "dash://" + video_url
+ logging.info("Downloading: %s", filename)
+
+ cmd = [
+ "streamlink",
+ "--force",
+ "--output", filename,
+ video_url,
+ "best",
+ ]
+ if exec_subprocess(cmd):
+ return convert_to_mp4(filename)
+ else:
+ return False
+
+def download_http(filename, video_url):
+ filename = sanify_filename(filename)
+ logging.info("Downloading: %s", filename)
+
+ cmd = [
+ "curl",
+ "--fail", "--retry", "3",
+ "-o", filename,
+ video_url,
+ ]
+ if exec_subprocess(cmd):
+ return convert_to_mp4(filename)
+ else:
+ return False
+
+def natural_sort(l, key=None):
+ ignore_list = ["a", "the"]
+ def key_func(k):
+ if key is not None:
+ k = key(k)
+ k = k.lower()
+ newk = []
+ for c in re.split("([0-9]+)", k):
+ c = c.strip()
+ if c.isdigit():
+ newk.append(c.zfill(5))
+ else:
+ for subc in c.split():
+ if subc not in ignore_list:
+ newk.append(subc)
+ return newk
+
+ return sorted(l, key=key_func)
+
+def append_to_qs(url, params):
+ r = list(urllib.parse.urlsplit(url))
+ qs = urllib.parse.parse_qs(r[3])
+ for k, v in params.items():
+ if v is not None:
+ qs[k] = v
+ elif k in qs:
+ del qs[k]
+ r[3] = urllib.parse.urlencode(sorted(qs.items()), True)
+ url = urllib.parse.urlunsplit(r)
+ return url