]>
code.delx.au - webdl/blob - common.py
1 # vim:ts=4:sts=4:sw=4:noet
3 from lxml
import etree
, html
25 autosocks
.try_autosocks()
29 CACHE_DIR
= os
.path
.expanduser("~/.cache/webdl")
30 USER_AGENT
= "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1"
33 def __init__(self
, title
, parent
=None):
36 parent
.children
.append(self
)
39 self
.can_download
= False
41 def get_children(self
):
46 def fill_children(self
):
54 root_node
= Node("Root")
57 iview
.fill_nodes(root_node
)
60 sbs
.fill_nodes(root_node
)
63 plus7
.fill_nodes(root_node
)
67 valid_chars
= frozenset("-_.()!@#%^ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
68 def sanify_filename(filename
):
69 filename
= filename
.encode("ascii", "ignore")
70 filename
= "".join(c
for c
in filename
if c
in valid_chars
)
73 cookiejar
= cookielib
.CookieJar()
74 urlopener
= urllib2
.build_opener(urllib2
.HTTPCookieProcessor(cookiejar
))
75 def _urlopen(url
, referrer
=None):
76 req
= urllib2
.Request(url
)
77 req
.add_header("User-Agent", USER_AGENT
)
79 req
.add_header("Referer", referrer
)
80 return urlopener
.open(req
)
82 def urlopen(url
, max_age
):
84 if not os
.path
.isdir(CACHE_DIR
):
85 os
.makedirs(CACHE_DIR
)
90 filename
= hashlib
.md5(url
).hexdigest()
91 filename
= os
.path
.join(CACHE_DIR
, filename
)
92 if os
.path
.exists(filename
):
93 file_age
= int(time
.time()) - os
.path
.getmtime(filename
)
94 if file_age
< max_age
:
98 dst
= open(filename
, "w")
100 shutil
.copyfileobj(src
, dst
)
110 return open(filename
)
112 def grab_html(url
, max_age
):
113 f
= urlopen(url
, max_age
)
114 doc
= html
.parse(f
, html
.HTMLParser(encoding
="utf-8", recover
=True))
118 def grab_xml(url
, max_age
):
119 f
= urlopen(url
, max_age
)
120 doc
= etree
.parse(f
, etree
.XMLParser(encoding
="utf-8", recover
=True))
124 def grab_json(url
, max_age
, skip_assignment
=False, skip_function
=False):
125 f
= urlopen(url
, max_age
)
129 doc
= json
.loads(text
[pos
+1:])
133 rpos
= text
.rfind(")")
134 doc
= json
.loads(text
[pos
+1:rpos
])
140 def exec_subprocess(cmd
):
142 p
= subprocess
.Popen(cmd
)
145 print >>sys
.stderr
, cmd
[0], "exited with error code:", ret
150 print >>sys
.stderr
, "Failed to run", cmd
[0], e
151 except KeyboardInterrupt:
152 print "Cancelled", cmd
156 except KeyboardInterrupt:
157 p
.send_signal(signal
.SIGKILL
)
162 def convert_flv_mp4(orig_filename
):
163 basename
= os
.path
.splitext(orig_filename
)[0]
164 flv_filename
= basename
+ ".flv"
165 mp4_filename
= basename
+ ".mp4"
166 if orig_filename
!= flv_filename
:
167 os
.rename(orig_filename
, flv_filename
)
168 print "Converting %s to mp4" % flv_filename
176 if not exec_subprocess(cmd
):
179 flv_size
= os
.stat(flv_filename
).st_size
180 mp4_size
= os
.stat(mp4_filename
).st_size
181 if abs(flv_size
- mp4_size
) < 0.05 * flv_size
:
182 os
.unlink(flv_filename
)
184 print >>sys
.stderr
, "The size of", mp4_filename
, "is suspicious, did ffmpeg fail?"
186 print "Conversion failed", e
188 def convert_filename(filename
):
189 if os
.path
.splitext(filename
.lower())[1] in (".mp4", ".flv"):
193 if fourcc
== "FLV\x01":
194 convert_flv_mp4(filename
)
196 def download_rtmp(filename
, vbase
, vpath
, hash_url
=None):
197 filename
= sanify_filename(filename
)
198 print "Downloading: %s" % filename
199 if vpath
.endswith(".flv"):
207 if hash_url
is not None:
208 cmd
+= ["--swfVfy", hash_url
]
209 if exec_subprocess(cmd
):
210 convert_filename(filename
)
215 def download_urllib(filename
, url
, referrer
=None):
216 filename
= sanify_filename(filename
)
217 print "Downloading: %s" % filename
219 src
= _urlopen(url
, referrer
)
220 dst
= open(filename
, "w")
222 buf
= src
.read(1024*1024)
226 sys
.stdout
.write(".")
229 convert_filename(filename
)
231 except KeyboardInterrupt:
232 print "\nCancelled", url
244 def natural_sort(l
, key
=None):
245 ignore_list
= ["a", "the"]
251 for c
in re
.split("([0-9]+)", k
):
256 for subc
in c
.split():
257 if subc
not in ignore_list
:
261 return sorted(l
, key
=key_func
)
263 def append_to_qs(url
, params
):
264 r
= list(urlparse
.urlsplit(url
))
265 qs
= urlparse
.parse_qs(r
[3])
266 for k
, v
in params
.iteritems():
271 r
[3] = urllib
.urlencode(qs
, True)
272 url
= urlparse
.urlunsplit(r
)