-#!/usr/bin/env python
-# vim:ts=4:sts=4:sw=4:noet
+import requests_cache
+from common import grab_html, grab_json, grab_xml, download_hls, download_mpd, Node, append_to_qs
-from common import grab_json, grab_xml, download_rtmp, Node
+import json
+import logging
+import os
+import sys
-import collections
-
-BASE = "http://www.sbs.com.au"
-MENU_URL = "/api/video_feed/f/dYtmxB/%s?startIndex=%d"
-VIDEO_URL = BASE + "/api/video_feed/f/dYtmxB/CxeOeDELXKEv/%s?form=json"
+BASE = "https://www.sbs.com.au"
+FULL_VIDEO_LIST = BASE + "/api/video_feed/f/Bgtm9B/sbs-section-programs/"
+VIDEO_URL = BASE + "/ondemand/video/single/%s"
+PARAMS_URL = BASE + "/api/video_pdkvars/id/%s?form=json"
NS = {
- "smil": "http://www.w3.org/2005/SMIL21/Language",
-}
-
-SECTIONS = [
- "section-sbstv",
- "section-programs",
-]
-
-CATEGORY_MAP = {
- "Factual": "Documentary",
+ "smil": "http://www.w3.org/2005/SMIL21/Language",
}
-class SbsNode(Node):
- def __init__(self, title, parent, video_id):
- Node.__init__(self, title, parent)
- self.title = title
- self.video_id = video_id.split("/")[-1]
- self.can_download = True
-
- def download(self):
- doc = grab_json(VIDEO_URL % self.video_id, 0)
- best_url = None
- best_bitrate = 0
- for d in doc["media$content"]:
- bitrate = d["plfile$bitrate"]
- if bitrate > best_bitrate or best_url is None:
- best_bitrate = bitrate
- best_url = d["plfile$url"]
-
- doc = grab_xml(best_url, 3600)
- vbase = doc.xpath("//smil:meta/@base", namespaces=NS)[0]
- vpath = doc.xpath("//smil:video/@src", namespaces=NS)[0]
- ext = vpath.rsplit(".", 1)[1]
- filename = self.title + "." + ext
-
- return download_rtmp(filename, vbase, vpath)
-
-def fill_entry(get_catnode, entry):
- title = entry["title"]
- if title.find("sneak peek") >= 0:
- print entry
- video_id = entry["id"]
- info = collections.defaultdict(list)
- for d in entry["media$categories"]:
- if not d.has_key("media$scheme"):
- continue
- info[d["media$scheme"]].append(d["media$name"])
-
- if "Section/Promos" in info.get("Section", []):
- # ignore promos
- return
-
- for category in info.get("Genre", ["$UnknownCategory$"]):
- category = CATEGORY_MAP.get(category, category)
- parent_node = get_catnode(category)
- SbsNode(title, parent_node, video_id)
-
-
-def fill_section(get_catnode, section):
- index = 1
- while True:
- doc = grab_json(BASE + MENU_URL % (section, index), 3600)
- if len(doc.get("entries", [])) == 0:
- break
- for entry in doc["entries"]:
- fill_entry(get_catnode, entry)
- index += doc["itemsPerPage"]
+class SbsVideoNode(Node):
+ def __init__(self, title, parent, url):
+ Node.__init__(self, title, parent)
+ self.video_id = url.split("/")[-1]
+ self.can_download = True
+
+ def download(self):
+ with requests_cache.disabled():
+ doc = grab_html(VIDEO_URL % self.video_id)
+ player_params = grab_json(PARAMS_URL % self.video_id)
+
+ error = player_params.get("error", None)
+ if error:
+ print("Cannot download:", error)
+ return False
+
+ release_url = player_params["releaseUrls"]["html"]
+ filename = self.title + ".ts"
+
+ hls_url = self.get_hls_url(release_url)
+ if hls_url:
+ return download_hls(filename, hls_url)
+ else:
+ return download_mpd(filename, release_url)
+
+ def get_hls_url(self, release_url):
+ with requests_cache.disabled():
+ doc = grab_xml("https:" + release_url.replace("http:", "").replace("https:", ""))
+ video = doc.xpath("//smil:video", namespaces=NS)
+ if not video:
+ return
+ video_url = video[0].attrib["src"]
+ return video_url
+
+class SbsNavNode(Node):
+ def create_video_node(self, entry_data):
+ SbsVideoNode(entry_data["title"], self, entry_data["id"])
+
+ def find_existing_child(self, path):
+ for child in self.children:
+ if child.title == path:
+ return child
+
+class SbsRootNode(SbsNavNode):
+ def __init__(self, parent):
+ Node.__init__(self, "SBS", parent)
+
+ def fill_children(self):
+ all_video_entries = self.load_all_video_entries()
+ category_and_entry_data = self.explode_videos_to_unique_categories(all_video_entries)
+ for category_path, entry_data in category_and_entry_data:
+ nav_node = self.create_nav_node(self, category_path)
+ nav_node.create_video_node(entry_data)
+
+ def load_all_video_entries(self):
+ channels = [
+ "Channel/SBS1",
+ "Channel/SBS Food",
+ "Channel/SBS VICELAND",
+ "Channel/SBS World Movies",
+ "Channel/Web Exclusive",
+ ]
+
+ all_entries = {}
+ for channel in channels:
+ self.load_all_video_entries_for_channel(all_entries, channel)
+
+ all_entries = list(all_entries.values())
+ print(" SBS fetched", len(all_entries))
+ return all_entries
+
+ def load_all_video_entries_for_channel(self, all_entries, channel):
+ offset = 1
+ page_size = 500
+ duplicate_warning = False
+
+ while True:
+ entries = self.fetch_entries_page(channel, offset, page_size)
+ if len(entries) == 0:
+ break
+
+ for entry in entries:
+ guid = entry["guid"]
+ if guid in entries and not duplicate_warning:
+ # https://bitbucket.org/delx/webdl/issues/102/recent-sbs-series-missing
+ logging.warn("SBS returned a duplicate response, data is probably missing. Try decreasing page_size.")
+ duplicate_warning = True
+
+ all_entries[guid] = entry
+
+ offset += page_size
+ if os.isatty(sys.stdout.fileno()):
+ sys.stdout.write(".")
+ sys.stdout.flush()
+
+ def fetch_entries_page(self, channel, offset, page_size):
+ url = append_to_qs(FULL_VIDEO_LIST, {
+ "range": "%s-%s" % (offset, offset+page_size-1),
+ "byCategories": channel,
+ })
+ data = grab_json(url)
+ if "entries" not in data:
+ raise Exception("Missing data in SBS response", data)
+ return data["entries"]
+
+ def explode_videos_to_unique_categories(self, all_video_entries):
+ for entry_data in all_video_entries:
+ for category_data in entry_data["media$categories"]:
+ category_path = self.calculate_category_path(
+ category_data["media$scheme"],
+ category_data["media$name"],
+ )
+ if category_path:
+ yield category_path, entry_data
+
+ def calculate_category_path(self, scheme, name):
+ if not scheme:
+ return
+ if scheme == name:
+ return
+ name = name.split("/")
+ if name[0] != scheme:
+ name.insert(0, scheme)
+ return name
+
+ def create_nav_node(self, parent, category_path):
+ if not category_path:
+ return parent
+
+ current_path = category_path[0]
+ current_node = parent.find_existing_child(current_path)
+ if not current_node:
+ current_node = SbsNavNode(current_path, parent)
+ return self.create_nav_node(current_node, category_path[1:])
def fill_nodes(root_node):
- catnodes = {}
- def get_catnode(name):
- try:
- return catnodes[name]
- except KeyError:
- n = Node(name, root_node)
- catnodes[name] = n
- return n
-
- for section in SECTIONS:
- fill_section(get_catnode, section)
-
-
+ SbsRootNode(root_node)