]>
code.delx.au - webdl/blob - common.py
1 # vim:ts=4:sts=4:sw=4:noet
3 from lxml
import etree
, html
24 autosocks
.try_autosocks()
26 CACHE_DIR
= os
.path
.expanduser("~/.cache/webdl")
27 USER_AGENT
= "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1"
30 def __init__(self
, title
, parent
=None):
33 parent
.children
.append(self
)
36 self
.can_download
= False
37 self
.sort_children
= False
39 def get_children(self
):
44 def fill_children(self
):
52 root_node
= Node("Root")
55 iview
.fill_nodes(root_node
)
58 sbs
.fill_nodes(root_node
)
61 plus7
.fill_nodes(root_node
)
65 valid_chars
= frozenset("-_.()!@#%^ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
66 def sanify_filename(filename
):
67 filename
= filename
.encode("ascii", "ignore")
68 filename
= "".join(c
for c
in filename
if c
in valid_chars
)
71 cookiejar
= cookielib
.CookieJar()
72 urlopener
= urllib2
.build_opener(urllib2
.HTTPCookieProcessor(cookiejar
))
73 def _urlopen(url
, referrer
=None):
74 req
= urllib2
.Request(url
)
75 req
.add_header("User-Agent", USER_AGENT
)
77 req
.add_header("Referer", referrer
)
78 return urlopener
.open(req
)
80 def urlopen(url
, max_age
):
82 if not os
.path
.isdir(CACHE_DIR
):
83 os
.makedirs(CACHE_DIR
)
88 filename
= hashlib
.md5(url
).hexdigest()
89 filename
= os
.path
.join(CACHE_DIR
, filename
)
90 if os
.path
.exists(filename
):
91 file_age
= int(time
.time()) - os
.path
.getmtime(filename
)
92 if file_age
< max_age
:
96 dst
= open(filename
, "w")
98 shutil
.copyfileobj(src
, dst
)
108 return open(filename
)
110 def grab_html(url
, max_age
):
111 f
= urlopen(url
, max_age
)
112 doc
= html
.parse(f
, html
.HTMLParser(encoding
="utf-8", recover
=True))
116 def grab_xml(url
, max_age
):
117 f
= urlopen(url
, max_age
)
118 doc
= etree
.parse(f
, etree
.XMLParser(encoding
="utf-8", recover
=True))
122 def grab_json(url
, max_age
, skip_assignment
=False):
123 f
= urlopen(url
, max_age
)
127 doc
= json
.loads(text
[pos
+1:])
133 def exec_subprocess(cmd
):
135 p
= subprocess
.Popen(cmd
)
138 print >>sys
.stderr
, cmd
[0], "exited with error code:", ret
143 print >>sys
.stderr
, "Failed to run", cmd
[0], e
144 except KeyboardInterrupt:
145 print "Cancelled", cmd
149 except KeyboardInterrupt:
150 p
.send_signal(signal
.SIGKILL
)
155 def convert_flv_mp4(orig_filename
):
156 basename
= os
.path
.splitext(orig_filename
)[0]
157 flv_filename
= basename
+ ".flv"
158 mp4_filename
= basename
+ ".mp4"
159 if orig_filename
!= flv_filename
:
160 os
.rename(orig_filename
, flv_filename
)
161 print "Converting %s to mp4" % flv_filename
169 if not exec_subprocess(cmd
):
172 flv_size
= os
.stat(flv_filename
).st_size
173 mp4_size
= os
.stat(mp4_filename
).st_size
174 if abs(flv_size
- mp4_size
) < 0.05 * flv_size
:
175 os
.unlink(flv_filename
)
177 print >>sys
.stderr
, "The size of", mp4_filename
, "is suspicious, did ffmpeg fail?"
179 print "Conversion failed", e
181 def convert_filename(filename
):
182 if os
.path
.splitext(filename
.lower())[1] in (".mp4", ".flv"):
186 if fourcc
== "FLV\x01":
187 convert_flv_mp4(filename
)
189 def download_rtmp(filename
, vbase
, vpath
, hash_url
=None):
190 filename
= sanify_filename(filename
)
191 print "Downloading: %s" % filename
192 if vpath
.endswith(".flv"):
200 if hash_url
is not None:
201 cmd
+= ["--swfVfy", hash_url
]
202 if exec_subprocess(cmd
):
203 convert_filename(filename
)
208 def download_urllib(filename
, url
, referrer
=None):
209 filename
= sanify_filename(filename
)
210 print "Downloading: %s" % filename
212 src
= _urlopen(url
, referrer
)
213 dst
= open(filename
, "w")
215 buf
= src
.read(1024*1024)
219 sys
.stdout
.write(".")
221 convert_filename(filename
)
223 except KeyboardInterrupt:
224 print "\nCancelled", url
236 def natural_sort(l
, key
=None):
237 ignore_list
= ["a", "the"]
243 for c
in re
.split("([0-9]+)", k
):
248 for subc
in c
.split():
249 if subc
not in ignore_list
:
253 return sorted(l
, key
=key_func
)
255 def append_to_qs(url
, params
):
256 r
= list(urlparse
.urlsplit(url
))
257 qs
= urlparse
.parse_qs(r
[3])
258 for k
, v
in params
.iteritems():
263 r
[3] = urllib
.urlencode(qs
, True)
264 url
= urlparse
.urlunsplit(r
)