]> code.delx.au - webdl/blobdiff - sbs.py
Download SBS player params from the API
[webdl] / sbs.py
diff --git a/sbs.py b/sbs.py
index 619aea16044817c18a85cd41f49e84961d507097..deac6eb0de5d13716bf5dde0c8b934946063c99f 100644 (file)
--- a/sbs.py
+++ b/sbs.py
@@ -1,11 +1,15 @@
 import requests_cache
-from common import grab_html, grab_json, grab_xml, download_hls, Node, append_to_qs
+from common import grab_html, grab_json, grab_xml, download_hls, download_mpd, Node, append_to_qs
 
 import json
+import logging
+import os
+import sys
 
-BASE = "http://www.sbs.com.au"
-FULL_VIDEO_LIST = BASE + "/api/video_search/v2/?m=1&filters={section}{Programs}"
+BASE = "https://www.sbs.com.au"
+FULL_VIDEO_LIST = BASE + "/api/video_feed/f/Bgtm9B/sbs-section-programs/"
 VIDEO_URL = BASE + "/ondemand/video/single/%s"
+PARAMS_URL = BASE + "/api/video_pdkvars/id/%s?form=json"
 
 NS = {
     "smil": "http://www.w3.org/2005/SMIL21/Language",
@@ -21,31 +25,30 @@ class SbsVideoNode(Node):
     def download(self):
         with requests_cache.disabled():
             doc = grab_html(VIDEO_URL % self.video_id)
-        player_params = self.get_player_params(doc)
-        release_url = player_params["releaseUrls"]["html"]
+        player_params = grab_json(PARAMS_URL % self.video_id)
 
-        with requests_cache.disabled():
-            doc = grab_xml(release_url if not release_url.startswith("//") else "http:" + release_url)
-        video = doc.xpath("//smil:video", namespaces=NS)[0]
-        video_url = video.attrib["src"]
-        if not video_url:
-            raise Exception("Unsupported video %s: %s" % (self.video_id, self.title))
+        error = player_params.get("error", None)
+        if error:
+            print("Cannot download:", error)
+            return False
+
+        release_url = player_params["releaseUrls"]["html"]
         filename = self.title + ".ts"
-        return download_hls(filename, video_url)
-
-    def get_player_params(self, doc):
-        for script in doc.xpath("//script"):
-            if not script.text:
-                continue
-            for line in script.text.split("\n"):
-                s = "var playerParams = {"
-                if s in line:
-                    p1 = line.find(s) + len(s) - 1
-                    p2 = line.find("};", p1) + 1
-                    if p1 >= 0 and p2 > 0:
-                        return json.loads(line[p1:p2])
-        raise Exception("Unable to find player params for %s: %s" % (self.video_id, self.title))
 
+        hls_url = self.get_hls_url(release_url)
+        if hls_url:
+            return download_hls(filename, hls_url)
+        else:
+            return download_mpd(filename, release_url)
+
+    def get_hls_url(self, release_url):
+        with requests_cache.disabled():
+            doc = grab_xml("https:" + release_url.replace("http:", "").replace("https:", ""))
+            video = doc.xpath("//smil:video", namespaces=NS)
+            if not video:
+                return
+            video_url = video[0].attrib["src"]
+            return video_url
 
 class SbsNavNode(Node):
     def create_video_node(self, entry_data):
@@ -68,19 +71,55 @@ class SbsRootNode(SbsNavNode):
             nav_node.create_video_node(entry_data)
 
     def load_all_video_entries(self):
+        channels = [
+            "Channel/SBS1",
+            "Channel/SBS Food",
+            "Channel/SBS VICELAND",
+            "Channel/SBS World Movies",
+            "Channel/Web Exclusive",
+        ]
+
+        all_entries = {}
+        for channel in channels:
+            self.load_all_video_entries_for_channel(all_entries, channel)
+
+        all_entries = list(all_entries.values())
+        print(" SBS fetched", len(all_entries))
+        return all_entries
+
+    def load_all_video_entries_for_channel(self, all_entries, channel):
         offset = 1
-        amount = 500
+        page_size = 500
+        duplicate_warning = False
+
         while True:
-            url = append_to_qs(FULL_VIDEO_LIST, {"range": "%s-%s" % (offset, offset+amount)})
-            data = grab_json(url)
-            if "entries" not in data:
-                raise Exception("Missing data in SBS response", data)
-            entries = data["entries"]
+            entries = self.fetch_entries_page(channel, offset, page_size)
             if len(entries) == 0:
                 break
+
             for entry in entries:
-                yield entry
-            offset += amount
+                guid = entry["guid"]
+                if guid in entries and not duplicate_warning:
+                    # https://bitbucket.org/delx/webdl/issues/102/recent-sbs-series-missing
+                    logging.warn("SBS returned a duplicate response, data is probably missing. Try decreasing page_size.")
+                    duplicate_warning = True
+
+                all_entries[guid] = entry
+
+            offset += page_size
+            if os.isatty(sys.stdout.fileno()):
+                sys.stdout.write(".")
+                sys.stdout.flush()
+
+    def fetch_entries_page(self, channel, offset, page_size):
+        url = append_to_qs(FULL_VIDEO_LIST, {
+            "range": "%s-%s" % (offset, offset+page_size-1),
+            "byCategories": channel,
+        })
+        data = grab_json(url)
+        if "entries" not in data:
+            raise Exception("Missing data in SBS response", data)
+        return data["entries"]
 
     def explode_videos_to_unique_categories(self, all_video_entries):
         for entry_data in all_video_entries: