diff options
| author | Richard Purdie <rpurdie@linux.intel.com> | 2010-09-30 21:35:20 +0100 |
|---|---|---|
| committer | Richard Purdie <rpurdie@linux.intel.com> | 2010-09-30 22:16:10 +0100 |
| commit | c09cae578e5568c0ac975124db31f9cac05d50e9 (patch) | |
| tree | 1183a51498c1d2c7874ea0d3741c4f70dbfc66ef /meta-extras/packages/python | |
| parent | a51df11c1596746c85b015562ed67f37382b88b5 (diff) | |
| download | poky-c09cae578e5568c0ac975124db31f9cac05d50e9.tar.gz | |
Move prism-firmware, spectrum-fw, python-urlgrabber, python-iniparse and yum-metadata to meta-extras
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
Diffstat (limited to 'meta-extras/packages/python')
9 files changed, 244 insertions, 0 deletions
diff --git a/meta-extras/packages/python/python-iniparse-native_0.3.2.bb b/meta-extras/packages/python/python-iniparse-native_0.3.2.bb new file mode 100644 index 0000000000..41aa13d97a --- /dev/null +++ b/meta-extras/packages/python/python-iniparse-native_0.3.2.bb | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | require python-iniparse_${PV}.bb | ||
| 2 | inherit native | ||
| 3 | DEPENDS = "python-native" | ||
| 4 | RDEPENDS = "" | ||
| 5 | PR = "r0" | ||
| 6 | |||
diff --git a/meta-extras/packages/python/python-iniparse_0.3.2.bb b/meta-extras/packages/python/python-iniparse_0.3.2.bb new file mode 100644 index 0000000000..27a38f4249 --- /dev/null +++ b/meta-extras/packages/python/python-iniparse_0.3.2.bb | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | DESCRIPTION = "iniparse is a INI parser for Python" | ||
| 2 | HOMEPAGE = "http://code.google.com/p/iniparse/" | ||
| 3 | SECTION = "devel/python" | ||
| 4 | PRIORITY = "optional" | ||
| 5 | LICENSE = "GPL" | ||
| 6 | PR = "r0" | ||
| 7 | |||
| 8 | SRC_URI = "http://iniparse.googlecode.com/files/iniparse-${PV}.tar.gz" | ||
| 9 | S = "${WORKDIR}/iniparse-${PV}" | ||
| 10 | |||
| 11 | inherit distutils | ||
diff --git a/meta-extras/packages/python/python-urlgrabber-native_3.9.1.bb b/meta-extras/packages/python/python-urlgrabber-native_3.9.1.bb new file mode 100644 index 0000000000..ff8f02ecd7 --- /dev/null +++ b/meta-extras/packages/python/python-urlgrabber-native_3.9.1.bb | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | require python-urlgrabber_${PV}.bb | ||
| 2 | inherit native | ||
| 3 | DEPENDS = "python-native python-pycurl-native" | ||
| 4 | RDEPENDS = "" | ||
diff --git a/meta-extras/packages/python/python-urlgrabber/urlgrabber-3.0.0-cleanup.patch b/meta-extras/packages/python/python-urlgrabber/urlgrabber-3.0.0-cleanup.patch new file mode 100644 index 0000000000..7a1ee059d1 --- /dev/null +++ b/meta-extras/packages/python/python-urlgrabber/urlgrabber-3.0.0-cleanup.patch | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | diff -up urlgrabber-3.0.0/urlgrabber/grabber.py.cleanup urlgrabber-3.0.0/urlgrabber/grabber.py | ||
| 2 | --- urlgrabber-3.0.0/urlgrabber/grabber.py.cleanup 2007-11-29 10:25:13.000000000 +0000 | ||
| 3 | +++ urlgrabber-3.0.0/urlgrabber/grabber.py 2007-11-29 10:26:15.000000000 +0000 | ||
| 4 | @@ -1204,16 +1204,18 @@ class URLGrabberFileObject: | ||
| 5 | bs = 1024*8 | ||
| 6 | size = 0 | ||
| 7 | |||
| 8 | - if amount is not None: bs = min(bs, amount - size) | ||
| 9 | - block = self.read(bs) | ||
| 10 | - size = size + len(block) | ||
| 11 | - while block: | ||
| 12 | - new_fo.write(block) | ||
| 13 | + try: | ||
| 14 | if amount is not None: bs = min(bs, amount - size) | ||
| 15 | block = self.read(bs) | ||
| 16 | size = size + len(block) | ||
| 17 | + while block: | ||
| 18 | + new_fo.write(block) | ||
| 19 | + if amount is not None: bs = min(bs, amount - size) | ||
| 20 | + block = self.read(bs) | ||
| 21 | + size = size + len(block) | ||
| 22 | + finally: | ||
| 23 | + new_fo.close() | ||
| 24 | |||
| 25 | - new_fo.close() | ||
| 26 | try: | ||
| 27 | modified_tuple = self.hdr.getdate_tz('last-modified') | ||
| 28 | modified_stamp = rfc822.mktime_tz(modified_tuple) | ||
diff --git a/meta-extras/packages/python/python-urlgrabber/urlgrabber-HEAD.patch b/meta-extras/packages/python/python-urlgrabber/urlgrabber-HEAD.patch new file mode 100644 index 0000000000..90180d29a0 --- /dev/null +++ b/meta-extras/packages/python/python-urlgrabber/urlgrabber-HEAD.patch | |||
| @@ -0,0 +1,142 @@ | |||
| 1 | diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py | ||
| 2 | index e090e90..a26880c 100644 | ||
| 3 | --- a/urlgrabber/grabber.py | ||
| 4 | +++ b/urlgrabber/grabber.py | ||
| 5 | @@ -439,6 +439,12 @@ try: | ||
| 6 | except: | ||
| 7 | __version__ = '???' | ||
| 8 | |||
| 9 | +try: | ||
| 10 | + # this part isn't going to do much - need to talk to gettext | ||
| 11 | + from i18n import _ | ||
| 12 | +except ImportError, msg: | ||
| 13 | + def _(st): return st | ||
| 14 | + | ||
| 15 | ######################################################################## | ||
| 16 | # functions for debugging output. These functions are here because they | ||
| 17 | # are also part of the module initialization. | ||
| 18 | @@ -1052,7 +1058,8 @@ class PyCurlFileObject(): | ||
| 19 | self._reget_length = 0 | ||
| 20 | self._prog_running = False | ||
| 21 | self._error = (None, None) | ||
| 22 | - self.size = None | ||
| 23 | + self.size = 0 | ||
| 24 | + self._hdr_ended = False | ||
| 25 | self._do_open() | ||
| 26 | |||
| 27 | |||
| 28 | @@ -1085,9 +1092,14 @@ class PyCurlFileObject(): | ||
| 29 | return -1 | ||
| 30 | |||
| 31 | def _hdr_retrieve(self, buf): | ||
| 32 | + if self._hdr_ended: | ||
| 33 | + self._hdr_dump = '' | ||
| 34 | + self.size = 0 | ||
| 35 | + self._hdr_ended = False | ||
| 36 | + | ||
| 37 | if self._over_max_size(cur=len(self._hdr_dump), | ||
| 38 | max_size=self.opts.max_header_size): | ||
| 39 | - return -1 | ||
| 40 | + return -1 | ||
| 41 | try: | ||
| 42 | self._hdr_dump += buf | ||
| 43 | # we have to get the size before we do the progress obj start | ||
| 44 | @@ -1104,7 +1116,17 @@ class PyCurlFileObject(): | ||
| 45 | s = parse150(buf) | ||
| 46 | if s: | ||
| 47 | self.size = int(s) | ||
| 48 | - | ||
| 49 | + | ||
| 50 | + if buf.lower().find('location') != -1: | ||
| 51 | + location = ':'.join(buf.split(':')[1:]) | ||
| 52 | + location = location.strip() | ||
| 53 | + self.scheme = urlparse.urlsplit(location)[0] | ||
| 54 | + self.url = location | ||
| 55 | + | ||
| 56 | + if len(self._hdr_dump) != 0 and buf == '\r\n': | ||
| 57 | + self._hdr_ended = True | ||
| 58 | + if DEBUG: DEBUG.info('header ended:') | ||
| 59 | + | ||
| 60 | return len(buf) | ||
| 61 | except KeyboardInterrupt: | ||
| 62 | return pycurl.READFUNC_ABORT | ||
| 63 | @@ -1136,6 +1158,7 @@ class PyCurlFileObject(): | ||
| 64 | self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update) | ||
| 65 | self.curl_obj.setopt(pycurl.FAILONERROR, True) | ||
| 66 | self.curl_obj.setopt(pycurl.OPT_FILETIME, True) | ||
| 67 | + self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) | ||
| 68 | |||
| 69 | if DEBUG: | ||
| 70 | self.curl_obj.setopt(pycurl.VERBOSE, True) | ||
| 71 | @@ -1291,7 +1314,12 @@ class PyCurlFileObject(): | ||
| 72 | raise err | ||
| 73 | |||
| 74 | elif str(e.args[1]) == '' and self.http_code != 0: # fake it until you make it | ||
| 75 | - msg = 'HTTP Error %s : %s ' % (self.http_code, self.url) | ||
| 76 | + if self.scheme in ['http', 'https']: | ||
| 77 | + msg = 'HTTP Error %s : %s ' % (self.http_code, self.url) | ||
| 78 | + elif self.scheme in ['ftp']: | ||
| 79 | + msg = 'FTP Error %s : %s ' % (self.http_code, self.url) | ||
| 80 | + else: | ||
| 81 | + msg = "Unknown Error: URL=%s , scheme=%s" % (self.url, self.scheme) | ||
| 82 | else: | ||
| 83 | msg = 'PYCURL ERROR %s - "%s"' % (errcode, str(e.args[1])) | ||
| 84 | code = errcode | ||
| 85 | @@ -1299,6 +1327,12 @@ class PyCurlFileObject(): | ||
| 86 | err.code = code | ||
| 87 | err.exception = e | ||
| 88 | raise err | ||
| 89 | + else: | ||
| 90 | + if self._error[1]: | ||
| 91 | + msg = self._error[1] | ||
| 92 | + err = URLGRabError(14, msg) | ||
| 93 | + err.url = self.url | ||
| 94 | + raise err | ||
| 95 | |||
| 96 | def _do_open(self): | ||
| 97 | self.curl_obj = _curl_cache | ||
| 98 | @@ -1532,11 +1566,14 @@ class PyCurlFileObject(): | ||
| 99 | def _over_max_size(self, cur, max_size=None): | ||
| 100 | |||
| 101 | if not max_size: | ||
| 102 | - max_size = self.size | ||
| 103 | - if self.opts.size: # if we set an opts size use that, no matter what | ||
| 104 | - max_size = self.opts.size | ||
| 105 | + if not self.opts.size: | ||
| 106 | + max_size = self.size | ||
| 107 | + else: | ||
| 108 | + max_size = self.opts.size | ||
| 109 | + | ||
| 110 | if not max_size: return False # if we have None for all of the Max then this is dumb | ||
| 111 | - if cur > max_size + max_size*.10: | ||
| 112 | + | ||
| 113 | + if cur > int(float(max_size) * 1.10): | ||
| 114 | |||
| 115 | msg = _("Downloaded more than max size for %s: %s > %s") \ | ||
| 116 | % (self.url, cur, max_size) | ||
| 117 | @@ -1582,7 +1619,11 @@ class PyCurlFileObject(): | ||
| 118 | self.opts.progress_obj.end(self._amount_read) | ||
| 119 | self.fo.close() | ||
| 120 | |||
| 121 | - | ||
| 122 | + def geturl(self): | ||
| 123 | + """ Provide the geturl() method, used to be got from | ||
| 124 | + urllib.addinfourl, via. urllib.URLopener.* """ | ||
| 125 | + return self.url | ||
| 126 | + | ||
| 127 | _curl_cache = pycurl.Curl() # make one and reuse it over and over and over | ||
| 128 | |||
| 129 | |||
| 130 | diff --git a/urlgrabber/progress.py b/urlgrabber/progress.py | ||
| 131 | index dd07c6a..45eb248 100644 | ||
| 132 | --- a/urlgrabber/progress.py | ||
| 133 | +++ b/urlgrabber/progress.py | ||
| 134 | @@ -658,6 +658,8 @@ def format_time(seconds, use_hours=0): | ||
| 135 | if seconds is None or seconds < 0: | ||
| 136 | if use_hours: return '--:--:--' | ||
| 137 | else: return '--:--' | ||
| 138 | + elif seconds == float('inf'): | ||
| 139 | + return 'Infinite' | ||
| 140 | else: | ||
| 141 | seconds = int(seconds) | ||
| 142 | minutes = seconds / 60 | ||
diff --git a/meta-extras/packages/python/python-urlgrabber/urlgrabber-reset.patch b/meta-extras/packages/python/python-urlgrabber/urlgrabber-reset.patch new file mode 100644 index 0000000000..b63e7c33ac --- /dev/null +++ b/meta-extras/packages/python/python-urlgrabber/urlgrabber-reset.patch | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | --- a/urlgrabber/grabber.py 2010-02-19 14:50:45.000000000 -0500 | ||
| 2 | +++ b/urlgrabber/grabber.py 2010-02-19 14:51:28.000000000 -0500 | ||
| 3 | @@ -1626,6 +1626,12 @@ | ||
| 4 | |||
| 5 | _curl_cache = pycurl.Curl() # make one and reuse it over and over and over | ||
| 6 | |||
| 7 | +def reset_curl_obj(): | ||
| 8 | + """To make sure curl has reread the network/dns info we force a reload""" | ||
| 9 | + global _curl_cache | ||
| 10 | + _curl_cache.close() | ||
| 11 | + _curl_cache = pycurl.Curl() | ||
| 12 | + | ||
| 13 | |||
| 14 | ##################################################################### | ||
| 15 | # DEPRECATED FUNCTIONS | ||
diff --git a/meta-extras/packages/python/python-urlgrabber_3.9.1.bb b/meta-extras/packages/python/python-urlgrabber_3.9.1.bb new file mode 100644 index 0000000000..9f3a4cc6a7 --- /dev/null +++ b/meta-extras/packages/python/python-urlgrabber_3.9.1.bb | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | DESCRIPTION = "urlgrabber is a pure python package that drastically simplifies the fetching of files." | ||
| 2 | |||
| 3 | HOMEPAGE = "http://urlgrabber.baseurl.org/" | ||
| 4 | SECTION = "devel/python" | ||
| 5 | PRIORITY = "optional" | ||
| 6 | LICENSE = "GPL" | ||
| 7 | PR = "r1" | ||
| 8 | |||
| 9 | SRC_URI = "http://urlgrabber.baseurl.org/download/urlgrabber-${PV}.tar.gz \ | ||
| 10 | file://urlgrabber-HEAD.patch;patch=1 \ | ||
| 11 | file://urlgrabber-reset.patch;patch=1" | ||
| 12 | S = "${WORKDIR}/urlgrabber-${PV}" | ||
| 13 | |||
| 14 | DEPENDS = "python-pycurl" | ||
| 15 | |||
| 16 | inherit distutils \ No newline at end of file | ||
diff --git a/meta-extras/packages/python/yum-metadata-parser-native_1.1.4.bb b/meta-extras/packages/python/yum-metadata-parser-native_1.1.4.bb new file mode 100644 index 0000000000..0a44b99e2c --- /dev/null +++ b/meta-extras/packages/python/yum-metadata-parser-native_1.1.4.bb | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | require yum-metadata-parser_${PV}.bb | ||
| 2 | inherit native | ||
| 3 | DEPENDS = "python-native sqlite3-native glib-2.0-native libxml2-native" | ||
| 4 | RDEPENDS = "" | ||
| 5 | PR = "r0" | ||
| 6 | |||
| 7 | #BUILD_CFLAGS += "-I${STAGING_LIBDIR}/glib-2.0" | ||
diff --git a/meta-extras/packages/python/yum-metadata-parser_1.1.4.bb b/meta-extras/packages/python/yum-metadata-parser_1.1.4.bb new file mode 100644 index 0000000000..2e404a2bb1 --- /dev/null +++ b/meta-extras/packages/python/yum-metadata-parser_1.1.4.bb | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | DESCRIPTION = "C-based metadata parser to quickly parse xml metadata into sqlite databases." | ||
| 2 | HOMEPAGE = "http://linux.duke.edu/projects/yum/download.ptml" | ||
| 3 | SECTION = "devel/python" | ||
| 4 | PRIORITY = "optional" | ||
| 5 | DEPENDS = "python sqlite3 glib-2.0 libxml2" | ||
| 6 | LICENSE = "GPL" | ||
| 7 | |||
| 8 | PR = "r1" | ||
| 9 | |||
| 10 | SRC_URI = "http://linux.duke.edu/projects/yum/download/yum-metadata-parser/yum-metadata-parser-${PV}.tar.gz" | ||
| 11 | S = "${WORKDIR}/yum-metadata-parser-${PV}" | ||
| 12 | |||
| 13 | TARGET_CFLAGS += "-I${STAGING_LIBDIR}/glib-2.0" | ||
| 14 | |||
| 15 | inherit distutils | ||
