]>
code.delx.au - webdl/blob - common.py
1 from lxml
import etree
, html
23 autosocks
.try_autosocks()
27 CACHE_DIR
= os
.path
.expanduser("~/.cache/webdl")
28 USER_AGENT
= "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:21.0) Gecko/20100101 Firefox/21.0"
31 def __init__(self
, title
, parent
=None):
34 parent
.children
.append(self
)
37 self
.can_download
= False
39 def get_children(self
):
44 def fill_children(self
):
52 root_node
= Node("Root")
55 iview
.fill_nodes(root_node
)
58 sbs
.fill_nodes(root_node
)
61 plus7
.fill_nodes(root_node
)
64 brightcove
.fill_nodes(root_node
)
68 valid_chars
= frozenset("-_.()!@#%^ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
69 def sanify_filename(filename
):
70 filename
= filename
.encode("ascii", "ignore")
71 filename
= "".join(c
for c
in filename
if c
in valid_chars
)
74 cookiejar
= cookielib
.CookieJar()
75 urlopener
= urllib2
.build_opener(urllib2
.HTTPCookieProcessor(cookiejar
))
76 def _urlopen(url
, referrer
=None):
77 req
= urllib2
.Request(url
)
78 req
.add_header("User-Agent", USER_AGENT
)
80 req
.add_header("Referer", referrer
)
81 return urlopener
.open(req
)
83 def urlopen(url
, max_age
):
85 if not os
.path
.isdir(CACHE_DIR
):
86 os
.makedirs(CACHE_DIR
)
91 filename
= hashlib
.md5(url
).hexdigest()
92 filename
= os
.path
.join(CACHE_DIR
, filename
)
93 if os
.path
.exists(filename
):
94 file_age
= int(time
.time()) - os
.path
.getmtime(filename
)
95 if file_age
< max_age
:
99 dst
= open(filename
, "w")
101 shutil
.copyfileobj(src
, dst
)
111 return open(filename
)
113 def grab_text(url
, max_age
):
114 f
= urlopen(url
, max_age
)
115 text
= f
.read().decode("utf-8")
119 def grab_html(url
, max_age
):
120 f
= urlopen(url
, max_age
)
121 doc
= html
.parse(f
, html
.HTMLParser(encoding
="utf-8", recover
=True))
125 def grab_xml(url
, max_age
):
126 f
= urlopen(url
, max_age
)
127 doc
= etree
.parse(f
, etree
.XMLParser(encoding
="utf-8", recover
=True))
131 def grab_json(url
, max_age
, skip_assignment
=False, skip_function
=False):
132 f
= urlopen(url
, max_age
)
136 doc
= json
.loads(text
[pos
+1:])
140 rpos
= text
.rfind(")")
141 doc
= json
.loads(text
[pos
+1:rpos
])
147 def exec_subprocess(cmd
):
149 p
= subprocess
.Popen(cmd
)
152 print >>sys
.stderr
, cmd
[0], "exited with error code:", ret
157 print >>sys
.stderr
, "Failed to run", cmd
[0], e
158 except KeyboardInterrupt:
159 print "Cancelled", cmd
163 except KeyboardInterrupt:
164 p
.send_signal(signal
.SIGKILL
)
169 def avconv_remux(infile
, outfile
):
170 print "Converting %s to mp4" % infile
178 if not exec_subprocess(cmd
):
179 # failed, error has already been logged
182 flv_size
= os
.stat(infile
).st_size
183 mp4_size
= os
.stat(outfile
).st_size
184 if abs(flv_size
- mp4_size
) < 0.1 * flv_size
:
188 print >>sys
.stderr
, "The size of", outfile
, "is suspicious, did avconv fail?"
191 print >>sys
.stderr
, "Conversion failed", e
194 def convert_to_mp4(filename
):
195 with
open(filename
) as f
:
197 basename
, ext
= os
.path
.splitext(filename
)
199 if ext
== ".mp4" and fourcc
== "FLV\x01":
200 os
.rename(filename
, basename
+ ".flv")
202 filename
= basename
+ ext
204 if ext
in (".flv", ".ts"):
205 filename_mp4
= basename
+ ".mp4"
206 return avconv_remux(filename
, filename_mp4
)
211 def download_rtmp(filename
, vbase
, vpath
, hash_url
=None):
212 filename
= sanify_filename(filename
)
213 print "Downloading: %s" % filename
214 if vpath
.endswith(".flv"):
222 if hash_url
is not None:
223 cmd
+= ["--swfVfy", hash_url
]
224 if exec_subprocess(cmd
):
225 return convert_to_mp4(filename
)
229 def download_urllib(filename
, url
, referrer
=None):
230 filename
= sanify_filename(filename
)
231 print "Downloading: %s" % filename
233 src
= _urlopen(url
, referrer
)
234 dst
= open(filename
, "w")
236 buf
= src
.read(1024*1024)
240 sys
.stdout
.write(".")
243 except KeyboardInterrupt:
244 print "\nCancelled", url
256 return convert_to_mp4(filename
)
258 def download_hls_get_stream(url
):
259 def parse_bandwidth(line
):
260 params
= line
.split(":", 1)[1].split(",")
262 k
, v
= kv
.split("=", 1)
267 m3u8
= grab_text(url
, 0)
268 best_bandwidth
= None
270 for line
in m3u8
.split("\n"):
271 if line
.startswith("#EXT-X-STREAM-INF:"):
272 bandwidth
= parse_bandwidth(line
)
273 if best_bandwidth
is None or bandwidth
> best_bandwidth
:
274 best_bandwidth
= bandwidth
276 elif not line
.startswith("#"):
278 best_url
= line
.strip()
281 raise Exception("Failed to find best stream for HLS: " + url
)
285 def download_hls_segments(outf
, url
):
286 m3u8
= grab_text(url
, 0)
288 fail_if_not_last_segment
= None
289 for line
in m3u8
.split("\n"):
290 if not line
.strip() or line
.startswith("#"):
293 if fail_if_not_last_segment
:
297 download_hls_fetch_segment(outf
, line
)
298 except urllib2
.HTTPError
, e
:
299 fail_if_not_last_segment
= e
301 sys
.stdout
.write(".")
304 sys
.stdout
.write("\n")
306 def download_hls_fetch_segment(outf
, segment_url
):
308 src
= _urlopen(segment_url
)
309 shutil
.copyfileobj(src
, outf
)
318 def download_hls(filename
, m3u8_master_url
, hack_url_func
=None):
319 if hack_url_func
is None:
320 hack_url_func
= lambda url
: url
322 tmpdir
= tempfile
.mkdtemp(prefix
="webdl-hls")
324 print "Downloading: %s" % filename
327 best_stream_url
= download_hls_get_stream(hack_url_func(m3u8_master_url
))
328 ts_file
= open(filename
, "w")
329 download_hls_segments(ts_file
, hack_url_func(best_stream_url
))
330 except KeyboardInterrupt:
331 print "\nCancelled", m3u8_master_url
334 shutil
.rmtree(tmpdir
)
340 return convert_to_mp4(filename
)
342 def natural_sort(l
, key
=None):
343 ignore_list
= ["a", "the"]
349 for c
in re
.split("([0-9]+)", k
):
354 for subc
in c
.split():
355 if subc
not in ignore_list
:
359 return sorted(l
, key
=key_func
)
361 def append_to_qs(url
, params
):
362 r
= list(urlparse
.urlsplit(url
))
363 qs
= urlparse
.parse_qs(r
[3])
364 for k
, v
in params
.iteritems():
369 r
[3] = urllib
.urlencode(qs
, True)
370 url
= urlparse
.urlunsplit(r
)