]> code.delx.au - webdl/blobdiff - sbs.py
Fixed iView downloads
[webdl] / sbs.py
diff --git a/sbs.py b/sbs.py
index 1d318487352e0983f89386a35af0f543a4045e75..c3cbe528086e947dc3dc0934e2895b95ff6a1e36 100644 (file)
--- a/sbs.py
+++ b/sbs.py
-#!/usr/bin/env python
-# vim:ts=4:sts=4:sw=4:noet
+import requests_cache
+from common import grab_html, grab_json, grab_xml, download_hls, download_mpd, Node, append_to_qs
 
-from common import grab_html, grab_json, grab_xml, download_rtmp, download_urllib, Node
+import json
+import logging
+import os
+import sys
 
-import collections
-
-BASE = "http://www.sbs.com.au"
-MENU_URL = "/api/video_feed/f/dYtmxB/%s?startIndex=%d"
+BASE = "https://www.sbs.com.au"
+FULL_VIDEO_LIST = BASE + "/api/video_feed/f/Bgtm9B/sbs-section-programs/"
 VIDEO_URL = BASE + "/ondemand/video/single/%s"
 
 NS = {
-       "smil": "http://www.w3.org/2005/SMIL21/Language",
+    "smil": "http://www.w3.org/2005/SMIL21/Language",
 }
 
-SECTIONS = [
-       "section-sbstv",
-       "section-programs",
-]
 
-CATEGORY_MAP = {
-       "Factual": "Documentary",
-}
+class SbsVideoNode(Node):
+    def __init__(self, title, parent, url):
+        Node.__init__(self, title, parent)
+        self.video_id = url.split("/")[-1]
+        self.can_download = True
+
+    def download(self):
+        with requests_cache.disabled():
+            doc = grab_html(VIDEO_URL % self.video_id)
+        player_params = self.get_player_params(doc)
 
+        error = player_params.get("error", None)
+        if error:
+            print("Cannot download:", error)
+            return False
 
-class SbsNode(Node):
-       def __init__(self, title, parent, video_id):
-               Node.__init__(self, title, parent)
-               self.title = title
-               self.video_id = video_id.split("/")[-1]
-               self.can_download = True
+        release_url = player_params["releaseUrls"]["html"]
+        filename = self.title + ".ts"
 
-       def download(self):
-               doc = grab_html(VIDEO_URL % self.video_id, 0)
-               desc_url = None
-               for script in doc.xpath("//script", namespaces=NS):
+        hls_url = self.get_hls_url(release_url)
+        if hls_url:
+            return download_hls(filename, hls_url)
+        else:
+            return download_mpd(filename, release_url)
+
+    def get_player_params(self, doc):
+        for script in doc.xpath("//script"):
             if not script.text:
                 continue
             for line in script.text.split("\n"):
-                if line.find("player.releaseUrl") < 0:
-                    continue
-                desc_url = line[line.find("\"")+1 : line.rfind("\"")]
-                               break
-                       if desc_url is not None:
-                               break
-               if desc_url is None:
-                       raise Exception("Failed to get JSON URL for " + self.title)
-
-               doc = grab_xml(desc_url, 0)
-               best_url = None
-               best_bitrate = 0
-               for video in doc.xpath("//smil:video", namespaces=NS):
-                       bitrate = int(video.attrib["system-bitrate"])
-                       if best_bitrate == 0 or best_bitrate < bitrate:
-                               best_bitrate = bitrate
-                               best_url = video.attrib["src"]
-
-               ext = best_url.rsplit(".", 1)[1]
-               filename = self.title + "." + ext
-               best_url += "?v=2.5.14&fp=MAC%2011,1,102,55&r=FLQDD&g=YNANAXRIYFYO"
-               return download_urllib(filename, best_url)
-
-def fill_entry(get_catnode, entry):
-       title = entry["title"]
-       video_id = entry["id"]
-       info = collections.defaultdict(list)
-       for d in entry["media$categories"]:
-               if not d.has_key("media$scheme"):
-                       continue
-               info[d["media$scheme"]].append(d["media$name"])
-
-       if "Section/Promos" in info.get("Section", []):
-               # ignore promos
-               return
-
-       for category in info.get("Genre", ["$UnknownCategory$"]):
-               category = CATEGORY_MAP.get(category, category)
-               parent_node = get_catnode(category)
-               SbsNode(title, parent_node, video_id)
-
-
-def fill_section(get_catnode, section):
-       index = 1
-       while True:
-               try:
-                       doc = grab_json(BASE + MENU_URL % (section, index), 3600)
-               except ValueError:
-                       # SBS sends XML as an error message :\
-                       break
-               if len(doc.get("entries", [])) == 0:
-                       break
-               for entry in doc["entries"]:
-                       fill_entry(get_catnode, entry)
-               index += doc["itemsPerPage"]
-
-class SbsRoot(Node):
-       def __init__(self, parent=None):
-               Node.__init__(self, "SBS", parent)
-               self.catnodes = {}
-
-       def get_catnode(self, name):
-               try:
-                       return self.catnodes[name]
-               except KeyError:
-                       n = Node(name, self)
-                       self.catnodes[name] = n
-                       return n
-
-       def fill_children(self):
-               for section in SECTIONS:
-                       fill_section(self.get_catnode, section)
+                s = "var playerParams = {"
+                if s in line:
+                    p1 = line.find(s) + len(s) - 1
+                    p2 = line.find("};", p1) + 1
+                    if p1 >= 0 and p2 > 0:
+                        return json.loads(line[p1:p2])
+        raise Exception("Unable to find player params for %s: %s" % (self.video_id, self.title))
+
+    def get_hls_url(self, release_url):
+        with requests_cache.disabled():
+            doc = grab_xml("https:" + release_url.replace("http:", "").replace("https:", ""))
+            video = doc.xpath("//smil:video", namespaces=NS)
+            if not video:
+                return
+            video_url = video[0].attrib["src"]
+            return video_url
+
+class SbsNavNode(Node):
+    def create_video_node(self, entry_data):
+        SbsVideoNode(entry_data["title"], self, entry_data["id"])
+
+    def find_existing_child(self, path):
+        for child in self.children:
+            if child.title == path:
+                return child
+
+class SbsRootNode(SbsNavNode):
+    def __init__(self, parent):
+        Node.__init__(self, "SBS", parent)
+
+    def fill_children(self):
+        all_video_entries = self.load_all_video_entries()
+        category_and_entry_data = self.explode_videos_to_unique_categories(all_video_entries)
+        for category_path, entry_data in category_and_entry_data:
+            nav_node = self.create_nav_node(self, category_path)
+            nav_node.create_video_node(entry_data)
+
+    def load_all_video_entries(self):
+        channels = [
+            "Channel/SBS1",
+            "Channel/SBS Food",
+            "Channel/SBS VICELAND",
+            "Channel/SBS World Movies",
+            "Channel/Web Exclusive",
+        ]
+
+        all_entries = {}
+        for channel in channels:
+            self.load_all_video_entries_for_channel(all_entries, channel)
+
+        all_entries = list(all_entries.values())
+        print(" SBS fetched", len(all_entries))
+        return all_entries
+
+    def load_all_video_entries_for_channel(self, all_entries, channel):
+        offset = 1
+        page_size = 500
+        duplicate_warning = False
+
+        while True:
+            entries = self.fetch_entries_page(channel, offset, page_size)
+            if len(entries) == 0:
+                break
+
+            for entry in entries:
+                guid = entry["guid"]
+                if guid in entries and not duplicate_warning:
+                    # https://bitbucket.org/delx/webdl/issues/102/recent-sbs-series-missing
+                    logging.warn("SBS returned a duplicate response, data is probably missing. Try decreasing page_size.")
+                    duplicate_warning = True
+
+                all_entries[guid] = entry
+
+            offset += page_size
+            if os.isatty(sys.stdout.fileno()):
+                sys.stdout.write(".")
+                sys.stdout.flush()
+
+    def fetch_entries_page(self, channel, offset, page_size):
+        url = append_to_qs(FULL_VIDEO_LIST, {
+            "range": "%s-%s" % (offset, offset+page_size-1),
+            "byCategories": channel,
+        })
+        data = grab_json(url)
+        if "entries" not in data:
+            raise Exception("Missing data in SBS response", data)
+        return data["entries"]
+
+    def explode_videos_to_unique_categories(self, all_video_entries):
+        for entry_data in all_video_entries:
+            for category_data in entry_data["media$categories"]:
+                category_path = self.calculate_category_path(
+                    category_data["media$scheme"],
+                    category_data["media$name"],
+                )
+                if category_path:
+                    yield category_path, entry_data
+
+    def calculate_category_path(self, scheme, name):
+        if not scheme:
+            return
+        if scheme == name:
+            return
+        name = name.split("/")
+        if name[0] != scheme:
+            name.insert(0, scheme)
+        return name
+
+    def create_nav_node(self, parent, category_path):
+        if not category_path:
+            return parent
+
+        current_path = category_path[0]
+        current_node = parent.find_existing_child(current_path)
+        if not current_node:
+            current_node = SbsNavNode(current_path, parent)
+        return self.create_nav_node(current_node, category_path[1:])
 
 def fill_nodes(root_node):
-       SbsRoot(root_node)
-
+    SbsRootNode(root_node)