# HG changeset patch # User Matthew Wild # Date 1263411576 0 # Node ID c4495fc7d00dd818af8eed546ef04960a6d319e3 # Parent 19c34f5fc8ebfce906c53c136ee2f9bb74e0c6ef Add support for fetching archives from URLs diff -r 19c34f5fc8eb -r c4495fc7d00d scripts/dtrx --- a/scripts/dtrx Sun Aug 30 11:55:49 2009 -0400 +++ b/scripts/dtrx Wed Jan 13 19:39:36 2010 +0000 @@ -38,6 +38,7 @@ import termios import textwrap import traceback +import urllib try: set @@ -1079,6 +1080,23 @@ (filename,)) return error +class UrlHandler(urllib.FancyURLopener): + def http_error_default(self, url, fp, errcode, errmsg, headers): + urllib.URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) + + def is_url(self, url): + if url.startswith("http://"): + return True + + def fetch(self, url): + i = url.rfind('/') + filename = url[i+1:] + try: + self.retrieve(url, filename) + except IOError: + return False, "Failed to fetch "+url + return True, filename + class ExtractorApplication(object): def __init__(self, arguments): @@ -1229,6 +1247,7 @@ return True def run(self): + urlhandler = UrlHandler(); if self.options.show_list: action = ListAction else: @@ -1238,6 +1257,8 @@ self.current_directory, self.filenames = self.archives.popitem() os.chdir(self.current_directory) for filename in self.filenames: + if urlhandler.is_url(filename): + error, filename = urlhandler.fetch(filename) builder = ExtractorBuilder(filename, self.options) error = (self.check_file(filename) or self.try_extractors(filename, builder.get_extractor()))