diff options
Diffstat (limited to 'bitbake-dev/lib')
41 files changed, 12277 insertions, 0 deletions
diff --git a/bitbake-dev/lib/bb/COW.py b/bitbake-dev/lib/bb/COW.py new file mode 100644 index 0000000000..e5063d60a8 --- /dev/null +++ b/bitbake-dev/lib/bb/COW.py | |||
| @@ -0,0 +1,320 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # This is a copy on write dictionary and set which abuses classes to try and be nice and fast. | ||
| 5 | # | ||
| 6 | # Copyright (C) 2006 Tim Amsell | ||
| 7 | # | ||
| 8 | # This program is free software; you can redistribute it and/or modify | ||
| 9 | # it under the terms of the GNU General Public License version 2 as | ||
| 10 | # published by the Free Software Foundation. | ||
| 11 | # | ||
| 12 | # This program is distributed in the hope that it will be useful, | ||
| 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | # GNU General Public License for more details. | ||
| 16 | # | ||
| 17 | # You should have received a copy of the GNU General Public License along | ||
| 18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 20 | # | ||
| 21 | #Please Note: | ||
| 22 | # Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. | ||
| 23 | # Assign a file to __warn__ to get warnings about slow operations. | ||
| 24 | # | ||
| 25 | |||
| 26 | from inspect import getmro | ||
| 27 | |||
| 28 | import copy | ||
| 29 | import types, sets | ||
| 30 | types.ImmutableTypes = tuple([ \ | ||
| 31 | types.BooleanType, \ | ||
| 32 | types.ComplexType, \ | ||
| 33 | types.FloatType, \ | ||
| 34 | types.IntType, \ | ||
| 35 | types.LongType, \ | ||
| 36 | types.NoneType, \ | ||
| 37 | types.TupleType, \ | ||
| 38 | sets.ImmutableSet] + \ | ||
| 39 | list(types.StringTypes)) | ||
| 40 | |||
| 41 | MUTABLE = "__mutable__" | ||
| 42 | |||
| 43 | class COWMeta(type): | ||
| 44 | pass | ||
| 45 | |||
| 46 | class COWDictMeta(COWMeta): | ||
| 47 | __warn__ = False | ||
| 48 | __hasmutable__ = False | ||
| 49 | __marker__ = tuple() | ||
| 50 | |||
| 51 | def __str__(cls): | ||
| 52 | # FIXME: I have magic numbers! | ||
| 53 | return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3) | ||
| 54 | __repr__ = __str__ | ||
| 55 | |||
| 56 | def cow(cls): | ||
| 57 | class C(cls): | ||
| 58 | __count__ = cls.__count__ + 1 | ||
| 59 | return C | ||
| 60 | copy = cow | ||
| 61 | __call__ = cow | ||
| 62 | |||
| 63 | def __setitem__(cls, key, value): | ||
| 64 | if not isinstance(value, types.ImmutableTypes): | ||
| 65 | if not isinstance(value, COWMeta): | ||
| 66 | cls.__hasmutable__ = True | ||
| 67 | key += MUTABLE | ||
| 68 | setattr(cls, key, value) | ||
| 69 | |||
| 70 | def __getmutable__(cls, key, readonly=False): | ||
| 71 | nkey = key + MUTABLE | ||
| 72 | try: | ||
| 73 | return cls.__dict__[nkey] | ||
| 74 | except KeyError: | ||
| 75 | pass | ||
| 76 | |||
| 77 | value = getattr(cls, nkey) | ||
| 78 | if readonly: | ||
| 79 | return value | ||
| 80 | |||
| 81 | if not cls.__warn__ is False and not isinstance(value, COWMeta): | ||
| 82 | print >> cls.__warn__, "Warning: Doing a copy because %s is a mutable type." % key | ||
| 83 | try: | ||
| 84 | value = value.copy() | ||
| 85 | except AttributeError, e: | ||
| 86 | value = copy.copy(value) | ||
| 87 | setattr(cls, nkey, value) | ||
| 88 | return value | ||
| 89 | |||
| 90 | __getmarker__ = [] | ||
| 91 | def __getreadonly__(cls, key, default=__getmarker__): | ||
| 92 | """\ | ||
| 93 | Get a value (even if mutable) which you promise not to change. | ||
| 94 | """ | ||
| 95 | return cls.__getitem__(key, default, True) | ||
| 96 | |||
| 97 | def __getitem__(cls, key, default=__getmarker__, readonly=False): | ||
| 98 | try: | ||
| 99 | try: | ||
| 100 | value = getattr(cls, key) | ||
| 101 | except AttributeError: | ||
| 102 | value = cls.__getmutable__(key, readonly) | ||
| 103 | |||
| 104 | # This is for values which have been deleted | ||
| 105 | if value is cls.__marker__: | ||
| 106 | raise AttributeError("key %s does not exist." % key) | ||
| 107 | |||
| 108 | return value | ||
| 109 | except AttributeError, e: | ||
| 110 | if not default is cls.__getmarker__: | ||
| 111 | return default | ||
| 112 | |||
| 113 | raise KeyError(str(e)) | ||
| 114 | |||
| 115 | def __delitem__(cls, key): | ||
| 116 | cls.__setitem__(key, cls.__marker__) | ||
| 117 | |||
| 118 | def __revertitem__(cls, key): | ||
| 119 | if not cls.__dict__.has_key(key): | ||
| 120 | key += MUTABLE | ||
| 121 | delattr(cls, key) | ||
| 122 | |||
| 123 | def has_key(cls, key): | ||
| 124 | value = cls.__getreadonly__(key, cls.__marker__) | ||
| 125 | if value is cls.__marker__: | ||
| 126 | return False | ||
| 127 | return True | ||
| 128 | |||
| 129 | def iter(cls, type, readonly=False): | ||
| 130 | for key in dir(cls): | ||
| 131 | if key.startswith("__"): | ||
| 132 | continue | ||
| 133 | |||
| 134 | if key.endswith(MUTABLE): | ||
| 135 | key = key[:-len(MUTABLE)] | ||
| 136 | |||
| 137 | if type == "keys": | ||
| 138 | yield key | ||
| 139 | |||
| 140 | try: | ||
| 141 | if readonly: | ||
| 142 | value = cls.__getreadonly__(key) | ||
| 143 | else: | ||
| 144 | value = cls[key] | ||
| 145 | except KeyError: | ||
| 146 | continue | ||
| 147 | |||
| 148 | if type == "values": | ||
| 149 | yield value | ||
| 150 | if type == "items": | ||
| 151 | yield (key, value) | ||
| 152 | raise StopIteration() | ||
| 153 | |||
| 154 | def iterkeys(cls): | ||
| 155 | return cls.iter("keys") | ||
| 156 | def itervalues(cls, readonly=False): | ||
| 157 | if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: | ||
| 158 | print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True." | ||
| 159 | return cls.iter("values", readonly) | ||
| 160 | def iteritems(cls, readonly=False): | ||
| 161 | if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: | ||
| 162 | print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True." | ||
| 163 | return cls.iter("items", readonly) | ||
| 164 | |||
| 165 | class COWSetMeta(COWDictMeta): | ||
| 166 | def __str__(cls): | ||
| 167 | # FIXME: I have magic numbers! | ||
| 168 | return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) -3) | ||
| 169 | __repr__ = __str__ | ||
| 170 | |||
| 171 | def cow(cls): | ||
| 172 | class C(cls): | ||
| 173 | __count__ = cls.__count__ + 1 | ||
| 174 | return C | ||
| 175 | |||
| 176 | def add(cls, value): | ||
| 177 | COWDictMeta.__setitem__(cls, repr(hash(value)), value) | ||
| 178 | |||
| 179 | def remove(cls, value): | ||
| 180 | COWDictMeta.__delitem__(cls, repr(hash(value))) | ||
| 181 | |||
| 182 | def __in__(cls, value): | ||
| 183 | return COWDictMeta.has_key(repr(hash(value))) | ||
| 184 | |||
| 185 | def iterkeys(cls): | ||
| 186 | raise TypeError("sets don't have keys") | ||
| 187 | |||
| 188 | def iteritems(cls): | ||
| 189 | raise TypeError("sets don't have 'items'") | ||
| 190 | |||
| 191 | # These are the actual classes you use! | ||
| 192 | class COWDictBase(object): | ||
| 193 | __metaclass__ = COWDictMeta | ||
| 194 | __count__ = 0 | ||
| 195 | |||
| 196 | class COWSetBase(object): | ||
| 197 | __metaclass__ = COWSetMeta | ||
| 198 | __count__ = 0 | ||
| 199 | |||
| 200 | if __name__ == "__main__": | ||
| 201 | import sys | ||
| 202 | COWDictBase.__warn__ = sys.stderr | ||
| 203 | a = COWDictBase() | ||
| 204 | print "a", a | ||
| 205 | |||
| 206 | a['a'] = 'a' | ||
| 207 | a['b'] = 'b' | ||
| 208 | a['dict'] = {} | ||
| 209 | |||
| 210 | b = a.copy() | ||
| 211 | print "b", b | ||
| 212 | b['c'] = 'b' | ||
| 213 | |||
| 214 | |||
| 215 | |||
| 216 | print "a", a | ||
| 217 | for x in a.iteritems(): | ||
| 218 | print x | ||
| 219 | print "--" | ||
| 220 | print "b", b | ||
| 221 | for x in b.iteritems(): | ||
| 222 | print x | ||
| 223 | |||
| 224 | |||
| 225 | b['dict']['a'] = 'b' | ||
| 226 | b['a'] = 'c' | ||
| 227 | |||
| 228 | print "a", a | ||
| 229 | for x in a.iteritems(): | ||
| 230 | print x | ||
| 231 | print "--" | ||
| 232 | print "b", b | ||
| 233 | for x in b.iteritems(): | ||
| 234 | print x | ||
| 235 | |||
| 236 | |||
| 237 | try: | ||
| 238 | b['dict2'] | ||
| 239 | except KeyError, e: | ||
| 240 | print "Okay!" | ||
| 241 | |||
| 242 | a['set'] = COWSetBase() | ||
| 243 | a['set'].add("o1") | ||
| 244 | a['set'].add("o1") | ||
| 245 | a['set'].add("o2") | ||
| 246 | |||
| 247 | print "a", a | ||
| 248 | for x in a['set'].itervalues(): | ||
| 249 | print x | ||
| 250 | print "--" | ||
| 251 | print "b", b | ||
| 252 | for x in b['set'].itervalues(): | ||
| 253 | print x | ||
| 254 | |||
| 255 | |||
| 256 | b['set'].add('o3') | ||
| 257 | |||
| 258 | print "a", a | ||
| 259 | for x in a['set'].itervalues(): | ||
| 260 | print x | ||
| 261 | print "--" | ||
| 262 | print "b", b | ||
| 263 | for x in b['set'].itervalues(): | ||
| 264 | print x | ||
| 265 | |||
| 266 | |||
| 267 | a['set2'] = set() | ||
| 268 | a['set2'].add("o1") | ||
| 269 | a['set2'].add("o1") | ||
| 270 | a['set2'].add("o2") | ||
| 271 | |||
| 272 | print "a", a | ||
| 273 | for x in a.iteritems(): | ||
| 274 | print x | ||
| 275 | print "--" | ||
| 276 | print "b", b | ||
| 277 | for x in b.iteritems(readonly=True): | ||
| 278 | print x | ||
| 279 | |||
| 280 | |||
| 281 | del b['b'] | ||
| 282 | try: | ||
| 283 | print b['b'] | ||
| 284 | except KeyError: | ||
| 285 | print "Yay! deleted key raises error" | ||
| 286 | |||
| 287 | if b.has_key('b'): | ||
| 288 | print "Boo!" | ||
| 289 | else: | ||
| 290 | print "Yay - has_key with delete works!" | ||
| 291 | |||
| 292 | print "a", a | ||
| 293 | for x in a.iteritems(): | ||
| 294 | print x | ||
| 295 | print "--" | ||
| 296 | print "b", b | ||
| 297 | for x in b.iteritems(readonly=True): | ||
| 298 | print x | ||
| 299 | |||
| 300 | |||
| 301 | b.__revertitem__('b') | ||
| 302 | |||
| 303 | print "a", a | ||
| 304 | for x in a.iteritems(): | ||
| 305 | print x | ||
| 306 | print "--" | ||
| 307 | print "b", b | ||
| 308 | for x in b.iteritems(readonly=True): | ||
| 309 | print x | ||
| 310 | |||
| 311 | |||
| 312 | b.__revertitem__('dict') | ||
| 313 | print "a", a | ||
| 314 | for x in a.iteritems(): | ||
| 315 | print x | ||
| 316 | print "--" | ||
| 317 | print "b", b | ||
| 318 | for x in b.iteritems(readonly=True): | ||
| 319 | print x | ||
| 320 | |||
diff --git a/bitbake-dev/lib/bb/__init__.py b/bitbake-dev/lib/bb/__init__.py new file mode 100644 index 0000000000..99995212c3 --- /dev/null +++ b/bitbake-dev/lib/bb/__init__.py | |||
| @@ -0,0 +1,1133 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # BitBake Build System Python Library | ||
| 5 | # | ||
| 6 | # Copyright (C) 2003 Holger Schurig | ||
| 7 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 8 | # | ||
| 9 | # Based on Gentoo's portage.py. | ||
| 10 | # | ||
| 11 | # This program is free software; you can redistribute it and/or modify | ||
| 12 | # it under the terms of the GNU General Public License version 2 as | ||
| 13 | # published by the Free Software Foundation. | ||
| 14 | # | ||
| 15 | # This program is distributed in the hope that it will be useful, | ||
| 16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | # GNU General Public License for more details. | ||
| 19 | # | ||
| 20 | # You should have received a copy of the GNU General Public License along | ||
| 21 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 22 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 23 | |||
| 24 | __version__ = "1.9.0" | ||
| 25 | |||
| 26 | __all__ = [ | ||
| 27 | |||
| 28 | "debug", | ||
| 29 | "note", | ||
| 30 | "error", | ||
| 31 | "fatal", | ||
| 32 | |||
| 33 | "mkdirhier", | ||
| 34 | "movefile", | ||
| 35 | |||
| 36 | "tokenize", | ||
| 37 | "evaluate", | ||
| 38 | "flatten", | ||
| 39 | "relparse", | ||
| 40 | "ververify", | ||
| 41 | "isjustname", | ||
| 42 | "isspecific", | ||
| 43 | "pkgsplit", | ||
| 44 | "catpkgsplit", | ||
| 45 | "vercmp", | ||
| 46 | "pkgcmp", | ||
| 47 | "dep_parenreduce", | ||
| 48 | "dep_opconvert", | ||
| 49 | |||
| 50 | # fetch | ||
| 51 | "decodeurl", | ||
| 52 | "encodeurl", | ||
| 53 | |||
| 54 | # modules | ||
| 55 | "parse", | ||
| 56 | "data", | ||
| 57 | "command", | ||
| 58 | "event", | ||
| 59 | "build", | ||
| 60 | "fetch", | ||
| 61 | "manifest", | ||
| 62 | "methodpool", | ||
| 63 | "cache", | ||
| 64 | "runqueue", | ||
| 65 | "taskdata", | ||
| 66 | "providers", | ||
| 67 | ] | ||
| 68 | |||
| 69 | whitespace = '\t\n\x0b\x0c\r ' | ||
| 70 | lowercase = 'abcdefghijklmnopqrstuvwxyz' | ||
| 71 | |||
| 72 | import sys, os, types, re, string, bb | ||
| 73 | from bb import msg | ||
| 74 | |||
| 75 | #projectdir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) | ||
| 76 | projectdir = os.getcwd() | ||
| 77 | |||
| 78 | if "BBDEBUG" in os.environ: | ||
| 79 | level = int(os.environ["BBDEBUG"]) | ||
| 80 | if level: | ||
| 81 | bb.msg.set_debug_level(level) | ||
| 82 | |||
| 83 | class VarExpandError(Exception): | ||
| 84 | pass | ||
| 85 | |||
| 86 | class MalformedUrl(Exception): | ||
| 87 | """Exception raised when encountering an invalid url""" | ||
| 88 | |||
| 89 | |||
| 90 | ####################################################################### | ||
| 91 | ####################################################################### | ||
| 92 | # | ||
| 93 | # SECTION: Debug | ||
| 94 | # | ||
| 95 | # PURPOSE: little functions to make yourself known | ||
| 96 | # | ||
| 97 | ####################################################################### | ||
| 98 | ####################################################################### | ||
| 99 | |||
| 100 | def plain(*args): | ||
| 101 | bb.msg.warn(''.join(args)) | ||
| 102 | |||
| 103 | def debug(lvl, *args): | ||
| 104 | bb.msg.debug(lvl, None, ''.join(args)) | ||
| 105 | |||
| 106 | def note(*args): | ||
| 107 | bb.msg.note(1, None, ''.join(args)) | ||
| 108 | |||
| 109 | def warn(*args): | ||
| 110 | bb.msg.warn(1, None, ''.join(args)) | ||
| 111 | |||
| 112 | def error(*args): | ||
| 113 | bb.msg.error(None, ''.join(args)) | ||
| 114 | |||
| 115 | def fatal(*args): | ||
| 116 | bb.msg.fatal(None, ''.join(args)) | ||
| 117 | |||
| 118 | |||
| 119 | ####################################################################### | ||
| 120 | ####################################################################### | ||
| 121 | # | ||
| 122 | # SECTION: File | ||
| 123 | # | ||
| 124 | # PURPOSE: Basic file and directory tree related functions | ||
| 125 | # | ||
| 126 | ####################################################################### | ||
| 127 | ####################################################################### | ||
| 128 | |||
| 129 | def mkdirhier(dir): | ||
| 130 | """Create a directory like 'mkdir -p', but does not complain if | ||
| 131 | directory already exists like os.makedirs | ||
| 132 | """ | ||
| 133 | |||
| 134 | debug(3, "mkdirhier(%s)" % dir) | ||
| 135 | try: | ||
| 136 | os.makedirs(dir) | ||
| 137 | debug(2, "created " + dir) | ||
| 138 | except OSError, e: | ||
| 139 | if e.errno != 17: raise e | ||
| 140 | |||
| 141 | |||
| 142 | ####################################################################### | ||
| 143 | |||
| 144 | import stat | ||
| 145 | |||
| 146 | def movefile(src,dest,newmtime=None,sstat=None): | ||
| 147 | """Moves a file from src to dest, preserving all permissions and | ||
| 148 | attributes; mtime will be preserved even when moving across | ||
| 149 | filesystems. Returns true on success and false on failure. Move is | ||
| 150 | atomic. | ||
| 151 | """ | ||
| 152 | |||
| 153 | #print "movefile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")" | ||
| 154 | try: | ||
| 155 | if not sstat: | ||
| 156 | sstat=os.lstat(src) | ||
| 157 | except Exception, e: | ||
| 158 | print "movefile: Stating source file failed...", e | ||
| 159 | return None | ||
| 160 | |||
| 161 | destexists=1 | ||
| 162 | try: | ||
| 163 | dstat=os.lstat(dest) | ||
| 164 | except: | ||
| 165 | dstat=os.lstat(os.path.dirname(dest)) | ||
| 166 | destexists=0 | ||
| 167 | |||
| 168 | if destexists: | ||
| 169 | if stat.S_ISLNK(dstat[stat.ST_MODE]): | ||
| 170 | try: | ||
| 171 | os.unlink(dest) | ||
| 172 | destexists=0 | ||
| 173 | except Exception, e: | ||
| 174 | pass | ||
| 175 | |||
| 176 | if stat.S_ISLNK(sstat[stat.ST_MODE]): | ||
| 177 | try: | ||
| 178 | target=os.readlink(src) | ||
| 179 | if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): | ||
| 180 | os.unlink(dest) | ||
| 181 | os.symlink(target,dest) | ||
| 182 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
| 183 | os.unlink(src) | ||
| 184 | return os.lstat(dest) | ||
| 185 | except Exception, e: | ||
| 186 | print "movefile: failed to properly create symlink:", dest, "->", target, e | ||
| 187 | return None | ||
| 188 | |||
| 189 | renamefailed=1 | ||
| 190 | if sstat[stat.ST_DEV]==dstat[stat.ST_DEV]: | ||
| 191 | try: | ||
| 192 | ret=os.rename(src,dest) | ||
| 193 | renamefailed=0 | ||
| 194 | except Exception, e: | ||
| 195 | import errno | ||
| 196 | if e[0]!=errno.EXDEV: | ||
| 197 | # Some random error. | ||
| 198 | print "movefile: Failed to move", src, "to", dest, e | ||
| 199 | return None | ||
| 200 | # Invalid cross-device-link 'bind' mounted or actually Cross-Device | ||
| 201 | |||
| 202 | if renamefailed: | ||
| 203 | didcopy=0 | ||
| 204 | if stat.S_ISREG(sstat[stat.ST_MODE]): | ||
| 205 | try: # For safety copy then move it over. | ||
| 206 | shutil.copyfile(src,dest+"#new") | ||
| 207 | os.rename(dest+"#new",dest) | ||
| 208 | didcopy=1 | ||
| 209 | except Exception, e: | ||
| 210 | print 'movefile: copy', src, '->', dest, 'failed.', e | ||
| 211 | return None | ||
| 212 | else: | ||
| 213 | #we don't yet handle special, so we need to fall back to /bin/mv | ||
| 214 | a=getstatusoutput("/bin/mv -f "+"'"+src+"' '"+dest+"'") | ||
| 215 | if a[0]!=0: | ||
| 216 | print "movefile: Failed to move special file:" + src + "' to '" + dest + "'", a | ||
| 217 | return None # failure | ||
| 218 | try: | ||
| 219 | if didcopy: | ||
| 220 | missingos.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
| 221 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown | ||
| 222 | os.unlink(src) | ||
| 223 | except Exception, e: | ||
| 224 | print "movefile: Failed to chown/chmod/unlink", dest, e | ||
| 225 | return None | ||
| 226 | |||
| 227 | if newmtime: | ||
| 228 | os.utime(dest,(newmtime,newmtime)) | ||
| 229 | else: | ||
| 230 | os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) | ||
| 231 | newmtime=sstat[stat.ST_MTIME] | ||
| 232 | return newmtime | ||
| 233 | |||
| 234 | def copyfile(src,dest,newmtime=None,sstat=None): | ||
| 235 | """ | ||
| 236 | Copies a file from src to dest, preserving all permissions and | ||
| 237 | attributes; mtime will be preserved even when moving across | ||
| 238 | filesystems. Returns true on success and false on failure. | ||
| 239 | """ | ||
| 240 | import os, stat, shutil | ||
| 241 | |||
| 242 | #print "copyfile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")" | ||
| 243 | try: | ||
| 244 | if not sstat: | ||
| 245 | sstat=os.lstat(src) | ||
| 246 | except Exception, e: | ||
| 247 | print "copyfile: Stating source file failed...", e | ||
| 248 | return False | ||
| 249 | |||
| 250 | destexists=1 | ||
| 251 | try: | ||
| 252 | dstat=os.lstat(dest) | ||
| 253 | except: | ||
| 254 | dstat=os.lstat(os.path.dirname(dest)) | ||
| 255 | destexists=0 | ||
| 256 | |||
| 257 | if destexists: | ||
| 258 | if stat.S_ISLNK(dstat[stat.ST_MODE]): | ||
| 259 | try: | ||
| 260 | os.unlink(dest) | ||
| 261 | destexists=0 | ||
| 262 | except Exception, e: | ||
| 263 | pass | ||
| 264 | |||
| 265 | if stat.S_ISLNK(sstat[stat.ST_MODE]): | ||
| 266 | try: | ||
| 267 | target=os.readlink(src) | ||
| 268 | if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): | ||
| 269 | os.unlink(dest) | ||
| 270 | os.symlink(target,dest) | ||
| 271 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
| 272 | return os.lstat(dest) | ||
| 273 | except Exception, e: | ||
| 274 | print "copyfile: failed to properly create symlink:", dest, "->", target, e | ||
| 275 | return False | ||
| 276 | |||
| 277 | if stat.S_ISREG(sstat[stat.ST_MODE]): | ||
| 278 | try: # For safety copy then move it over. | ||
| 279 | shutil.copyfile(src,dest+"#new") | ||
| 280 | os.rename(dest+"#new",dest) | ||
| 281 | except Exception, e: | ||
| 282 | print 'copyfile: copy', src, '->', dest, 'failed.', e | ||
| 283 | return False | ||
| 284 | else: | ||
| 285 | #we don't yet handle special, so we need to fall back to /bin/mv | ||
| 286 | a=getstatusoutput("/bin/cp -f "+"'"+src+"' '"+dest+"'") | ||
| 287 | if a[0]!=0: | ||
| 288 | print "copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a | ||
| 289 | return False # failure | ||
| 290 | try: | ||
| 291 | os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
| 292 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown | ||
| 293 | except Exception, e: | ||
| 294 | print "copyfile: Failed to chown/chmod/unlink", dest, e | ||
| 295 | return False | ||
| 296 | |||
| 297 | if newmtime: | ||
| 298 | os.utime(dest,(newmtime,newmtime)) | ||
| 299 | else: | ||
| 300 | os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) | ||
| 301 | newmtime=sstat[stat.ST_MTIME] | ||
| 302 | return newmtime | ||
| 303 | |||
| 304 | ####################################################################### | ||
| 305 | ####################################################################### | ||
| 306 | # | ||
| 307 | # SECTION: Download | ||
| 308 | # | ||
| 309 | # PURPOSE: Download via HTTP, FTP, CVS, BITKEEPER, handling of MD5-signatures | ||
| 310 | # and mirrors | ||
| 311 | # | ||
| 312 | ####################################################################### | ||
| 313 | ####################################################################### | ||
| 314 | |||
| 315 | def decodeurl(url): | ||
| 316 | """Decodes an URL into the tokens (scheme, network location, path, | ||
| 317 | user, password, parameters). | ||
| 318 | |||
| 319 | >>> decodeurl("http://www.google.com/index.html") | ||
| 320 | ('http', 'www.google.com', '/index.html', '', '', {}) | ||
| 321 | |||
| 322 | CVS url with username, host and cvsroot. The cvs module to check out is in the | ||
| 323 | parameters: | ||
| 324 | |||
| 325 | >>> decodeurl("cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg") | ||
| 326 | ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}) | ||
| 327 | |||
| 328 | Dito, but this time the username has a password part. And we also request a special tag | ||
| 329 | to check out. | ||
| 330 | |||
| 331 | >>> decodeurl("cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;module=familiar/dist/ipkg;tag=V0-99-81") | ||
| 332 | ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'}) | ||
| 333 | """ | ||
| 334 | |||
| 335 | m = re.compile('(?P<type>[^:]*)://((?P<user>.+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url) | ||
| 336 | if not m: | ||
| 337 | raise MalformedUrl(url) | ||
| 338 | |||
| 339 | type = m.group('type') | ||
| 340 | location = m.group('location') | ||
| 341 | if not location: | ||
| 342 | raise MalformedUrl(url) | ||
| 343 | user = m.group('user') | ||
| 344 | parm = m.group('parm') | ||
| 345 | |||
| 346 | locidx = location.find('/') | ||
| 347 | if locidx != -1: | ||
| 348 | host = location[:locidx] | ||
| 349 | path = location[locidx:] | ||
| 350 | else: | ||
| 351 | host = "" | ||
| 352 | path = location | ||
| 353 | if user: | ||
| 354 | m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user) | ||
| 355 | if m: | ||
| 356 | user = m.group('user') | ||
| 357 | pswd = m.group('pswd') | ||
| 358 | else: | ||
| 359 | user = '' | ||
| 360 | pswd = '' | ||
| 361 | |||
| 362 | p = {} | ||
| 363 | if parm: | ||
| 364 | for s in parm.split(';'): | ||
| 365 | s1,s2 = s.split('=') | ||
| 366 | p[s1] = s2 | ||
| 367 | |||
| 368 | return (type, host, path, user, pswd, p) | ||
| 369 | |||
| 370 | ####################################################################### | ||
| 371 | |||
| 372 | def encodeurl(decoded): | ||
| 373 | """Encodes a URL from tokens (scheme, network location, path, | ||
| 374 | user, password, parameters). | ||
| 375 | |||
| 376 | >>> encodeurl(['http', 'www.google.com', '/index.html', '', '', {}]) | ||
| 377 | 'http://www.google.com/index.html' | ||
| 378 | |||
| 379 | CVS with username, host and cvsroot. The cvs module to check out is in the | ||
| 380 | parameters: | ||
| 381 | |||
| 382 | >>> encodeurl(['cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}]) | ||
| 383 | 'cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg' | ||
| 384 | |||
| 385 | Dito, but this time the username has a password part. And we also request a special tag | ||
| 386 | to check out. | ||
| 387 | |||
| 388 | >>> encodeurl(['cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'}]) | ||
| 389 | 'cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg' | ||
| 390 | """ | ||
| 391 | |||
| 392 | (type, host, path, user, pswd, p) = decoded | ||
| 393 | |||
| 394 | if not type or not path: | ||
| 395 | fatal("invalid or missing parameters for url encoding") | ||
| 396 | url = '%s://' % type | ||
| 397 | if user: | ||
| 398 | url += "%s" % user | ||
| 399 | if pswd: | ||
| 400 | url += ":%s" % pswd | ||
| 401 | url += "@" | ||
| 402 | if host: | ||
| 403 | url += "%s" % host | ||
| 404 | url += "%s" % path | ||
| 405 | if p: | ||
| 406 | for parm in p.keys(): | ||
| 407 | url += ";%s=%s" % (parm, p[parm]) | ||
| 408 | |||
| 409 | return url | ||
| 410 | |||
| 411 | ####################################################################### | ||
| 412 | |||
| 413 | def which(path, item, direction = 0): | ||
| 414 | """ | ||
| 415 | Locate a file in a PATH | ||
| 416 | """ | ||
| 417 | |||
| 418 | paths = (path or "").split(':') | ||
| 419 | if direction != 0: | ||
| 420 | paths.reverse() | ||
| 421 | |||
| 422 | for p in (path or "").split(':'): | ||
| 423 | next = os.path.join(p, item) | ||
| 424 | if os.path.exists(next): | ||
| 425 | return next | ||
| 426 | |||
| 427 | return "" | ||
| 428 | |||
| 429 | ####################################################################### | ||
| 430 | |||
| 431 | |||
| 432 | |||
| 433 | |||
| 434 | ####################################################################### | ||
| 435 | ####################################################################### | ||
| 436 | # | ||
| 437 | # SECTION: Dependency | ||
| 438 | # | ||
| 439 | # PURPOSE: Compare build & run dependencies | ||
| 440 | # | ||
| 441 | ####################################################################### | ||
| 442 | ####################################################################### | ||
| 443 | |||
| 444 | def tokenize(mystring): | ||
| 445 | """Breaks a string like 'foo? (bar) oni? (blah (blah))' into (possibly embedded) lists: | ||
| 446 | |||
| 447 | >>> tokenize("x") | ||
| 448 | ['x'] | ||
| 449 | >>> tokenize("x y") | ||
| 450 | ['x', 'y'] | ||
| 451 | >>> tokenize("(x y)") | ||
| 452 | [['x', 'y']] | ||
| 453 | >>> tokenize("(x y) b c") | ||
| 454 | [['x', 'y'], 'b', 'c'] | ||
| 455 | >>> tokenize("foo? (bar) oni? (blah (blah))") | ||
| 456 | ['foo?', ['bar'], 'oni?', ['blah', ['blah']]] | ||
| 457 | >>> tokenize("sys-apps/linux-headers nls? (sys-devel/gettext)") | ||
| 458 | ['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']] | ||
| 459 | """ | ||
| 460 | |||
| 461 | newtokens = [] | ||
| 462 | curlist = newtokens | ||
| 463 | prevlists = [] | ||
| 464 | level = 0 | ||
| 465 | accum = "" | ||
| 466 | for x in mystring: | ||
| 467 | if x=="(": | ||
| 468 | if accum: | ||
| 469 | curlist.append(accum) | ||
| 470 | accum="" | ||
| 471 | prevlists.append(curlist) | ||
| 472 | curlist=[] | ||
| 473 | level=level+1 | ||
| 474 | elif x==")": | ||
| 475 | if accum: | ||
| 476 | curlist.append(accum) | ||
| 477 | accum="" | ||
| 478 | if level==0: | ||
| 479 | print "!!! tokenizer: Unmatched left parenthesis in:\n'"+mystring+"'" | ||
| 480 | return None | ||
| 481 | newlist=curlist | ||
| 482 | curlist=prevlists.pop() | ||
| 483 | curlist.append(newlist) | ||
| 484 | level=level-1 | ||
| 485 | elif x in whitespace: | ||
| 486 | if accum: | ||
| 487 | curlist.append(accum) | ||
| 488 | accum="" | ||
| 489 | else: | ||
| 490 | accum=accum+x | ||
| 491 | if accum: | ||
| 492 | curlist.append(accum) | ||
| 493 | if (level!=0): | ||
| 494 | print "!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+mystring+"'" | ||
| 495 | return None | ||
| 496 | return newtokens | ||
| 497 | |||
| 498 | |||
| 499 | ####################################################################### | ||
| 500 | |||
| 501 | def evaluate(tokens,mydefines,allon=0): | ||
| 502 | """Removes tokens based on whether conditional definitions exist or not. | ||
| 503 | Recognizes ! | ||
| 504 | |||
| 505 | >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {}) | ||
| 506 | ['sys-apps/linux-headers'] | ||
| 507 | |||
| 508 | Negate the flag: | ||
| 509 | |||
| 510 | >>> evaluate(['sys-apps/linux-headers', '!nls?', ['sys-devel/gettext']], {}) | ||
| 511 | ['sys-apps/linux-headers', ['sys-devel/gettext']] | ||
| 512 | |||
| 513 | Define 'nls': | ||
| 514 | |||
| 515 | >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {"nls":1}) | ||
| 516 | ['sys-apps/linux-headers', ['sys-devel/gettext']] | ||
| 517 | |||
| 518 | Turn allon on: | ||
| 519 | |||
| 520 | >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {}, True) | ||
| 521 | ['sys-apps/linux-headers', ['sys-devel/gettext']] | ||
| 522 | """ | ||
| 523 | |||
| 524 | if tokens == None: | ||
| 525 | return None | ||
| 526 | mytokens = tokens + [] # this copies the list | ||
| 527 | pos = 0 | ||
| 528 | while pos < len(mytokens): | ||
| 529 | if type(mytokens[pos]) == types.ListType: | ||
| 530 | evaluate(mytokens[pos], mydefines) | ||
| 531 | if not len(mytokens[pos]): | ||
| 532 | del mytokens[pos] | ||
| 533 | continue | ||
| 534 | elif mytokens[pos][-1] == "?": | ||
| 535 | cur = mytokens[pos][:-1] | ||
| 536 | del mytokens[pos] | ||
| 537 | if allon: | ||
| 538 | if cur[0] == "!": | ||
| 539 | del mytokens[pos] | ||
| 540 | else: | ||
| 541 | if cur[0] == "!": | ||
| 542 | if (cur[1:] in mydefines) and (pos < len(mytokens)): | ||
| 543 | del mytokens[pos] | ||
| 544 | continue | ||
| 545 | elif (cur not in mydefines) and (pos < len(mytokens)): | ||
| 546 | del mytokens[pos] | ||
| 547 | continue | ||
| 548 | pos = pos + 1 | ||
| 549 | return mytokens | ||
| 550 | |||
| 551 | |||
| 552 | ####################################################################### | ||
| 553 | |||
| 554 | def flatten(mytokens): | ||
| 555 | """Converts nested arrays into a flat arrays: | ||
| 556 | |||
| 557 | >>> flatten([1,[2,3]]) | ||
| 558 | [1, 2, 3] | ||
| 559 | >>> flatten(['sys-apps/linux-headers', ['sys-devel/gettext']]) | ||
| 560 | ['sys-apps/linux-headers', 'sys-devel/gettext'] | ||
| 561 | """ | ||
| 562 | |||
| 563 | newlist=[] | ||
| 564 | for x in mytokens: | ||
| 565 | if type(x)==types.ListType: | ||
| 566 | newlist.extend(flatten(x)) | ||
| 567 | else: | ||
| 568 | newlist.append(x) | ||
| 569 | return newlist | ||
| 570 | |||
| 571 | |||
| 572 | ####################################################################### | ||
| 573 | |||
| 574 | _package_weights_ = {"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1} # dicts are unordered | ||
| 575 | _package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list | ||
| 576 | |||
| 577 | def relparse(myver): | ||
| 578 | """Parses the last elements of a version number into a triplet, that can | ||
| 579 | later be compared: | ||
| 580 | |||
| 581 | >>> relparse('1.2_pre3') | ||
| 582 | [1.2, -2, 3.0] | ||
| 583 | >>> relparse('1.2b') | ||
| 584 | [1.2, 98, 0] | ||
| 585 | >>> relparse('1.2') | ||
| 586 | [1.2, 0, 0] | ||
| 587 | """ | ||
| 588 | |||
| 589 | number = 0 | ||
| 590 | p1 = 0 | ||
| 591 | p2 = 0 | ||
| 592 | mynewver = myver.split('_') | ||
| 593 | if len(mynewver)==2: | ||
| 594 | # an _package_weights_ | ||
| 595 | number = float(mynewver[0]) | ||
| 596 | match = 0 | ||
| 597 | for x in _package_ends_: | ||
| 598 | elen = len(x) | ||
| 599 | if mynewver[1][:elen] == x: | ||
| 600 | match = 1 | ||
| 601 | p1 = _package_weights_[x] | ||
| 602 | try: | ||
| 603 | p2 = float(mynewver[1][elen:]) | ||
| 604 | except: | ||
| 605 | p2 = 0 | ||
| 606 | break | ||
| 607 | if not match: | ||
| 608 | # normal number or number with letter at end | ||
| 609 | divider = len(myver)-1 | ||
| 610 | if myver[divider:] not in "1234567890": | ||
| 611 | # letter at end | ||
| 612 | p1 = ord(myver[divider:]) | ||
| 613 | number = float(myver[0:divider]) | ||
| 614 | else: | ||
| 615 | number = float(myver) | ||
| 616 | else: | ||
| 617 | # normal number or number with letter at end | ||
| 618 | divider = len(myver)-1 | ||
| 619 | if myver[divider:] not in "1234567890": | ||
| 620 | #letter at end | ||
| 621 | p1 = ord(myver[divider:]) | ||
| 622 | number = float(myver[0:divider]) | ||
| 623 | else: | ||
| 624 | number = float(myver) | ||
| 625 | return [number,p1,p2] | ||
| 626 | |||
| 627 | |||
| 628 | ####################################################################### | ||
| 629 | |||
| 630 | __ververify_cache__ = {} | ||
| 631 | |||
| 632 | def ververify(myorigval,silent=1): | ||
| 633 | """Returns 1 if given a valid version string, els 0. Valid versions are in the format | ||
| 634 | |||
| 635 | <v1>.<v2>...<vx>[a-z,_{_package_weights_}[vy]] | ||
| 636 | |||
| 637 | >>> ververify('2.4.20') | ||
| 638 | 1 | ||
| 639 | >>> ververify('2.4..20') # two dots | ||
| 640 | 0 | ||
| 641 | >>> ververify('2.x.20') # 'x' is not numeric | ||
| 642 | 0 | ||
| 643 | >>> ververify('2.4.20a') | ||
| 644 | 1 | ||
| 645 | >>> ververify('2.4.20cvs') # only one trailing letter | ||
| 646 | 0 | ||
| 647 | >>> ververify('1a') | ||
| 648 | 1 | ||
| 649 | >>> ververify('test_a') # no version at all | ||
| 650 | 0 | ||
| 651 | >>> ververify('2.4.20_beta1') | ||
| 652 | 1 | ||
| 653 | >>> ververify('2.4.20_beta') | ||
| 654 | 1 | ||
| 655 | >>> ververify('2.4.20_wrongext') # _wrongext is no valid trailer | ||
| 656 | 0 | ||
| 657 | """ | ||
| 658 | |||
| 659 | # Lookup the cache first | ||
| 660 | try: | ||
| 661 | return __ververify_cache__[myorigval] | ||
| 662 | except KeyError: | ||
| 663 | pass | ||
| 664 | |||
| 665 | if len(myorigval) == 0: | ||
| 666 | if not silent: | ||
| 667 | error("package version is empty") | ||
| 668 | __ververify_cache__[myorigval] = 0 | ||
| 669 | return 0 | ||
| 670 | myval = myorigval.split('.') | ||
| 671 | if len(myval)==0: | ||
| 672 | if not silent: | ||
| 673 | error("package name has empty version string") | ||
| 674 | __ververify_cache__[myorigval] = 0 | ||
| 675 | return 0 | ||
| 676 | # all but the last version must be a numeric | ||
| 677 | for x in myval[:-1]: | ||
| 678 | if not len(x): | ||
| 679 | if not silent: | ||
| 680 | error("package version has two points in a row") | ||
| 681 | __ververify_cache__[myorigval] = 0 | ||
| 682 | return 0 | ||
| 683 | try: | ||
| 684 | foo = int(x) | ||
| 685 | except: | ||
| 686 | if not silent: | ||
| 687 | error("package version contains non-numeric '"+x+"'") | ||
| 688 | __ververify_cache__[myorigval] = 0 | ||
| 689 | return 0 | ||
| 690 | if not len(myval[-1]): | ||
| 691 | if not silent: | ||
| 692 | error("package version has trailing dot") | ||
| 693 | __ververify_cache__[myorigval] = 0 | ||
| 694 | return 0 | ||
| 695 | try: | ||
| 696 | foo = int(myval[-1]) | ||
| 697 | __ververify_cache__[myorigval] = 1 | ||
| 698 | return 1 | ||
| 699 | except: | ||
| 700 | pass | ||
| 701 | |||
| 702 | # ok, our last component is not a plain number or blank, let's continue | ||
| 703 | if myval[-1][-1] in lowercase: | ||
| 704 | try: | ||
| 705 | foo = int(myval[-1][:-1]) | ||
| 706 | return 1 | ||
| 707 | __ververify_cache__[myorigval] = 1 | ||
| 708 | # 1a, 2.0b, etc. | ||
| 709 | except: | ||
| 710 | pass | ||
| 711 | # ok, maybe we have a 1_alpha or 1_beta2; let's see | ||
| 712 | ep=string.split(myval[-1],"_") | ||
| 713 | if len(ep)!= 2: | ||
| 714 | if not silent: | ||
| 715 | error("package version has more than one letter at then end") | ||
| 716 | __ververify_cache__[myorigval] = 0 | ||
| 717 | return 0 | ||
| 718 | try: | ||
| 719 | foo = string.atoi(ep[0]) | ||
| 720 | except: | ||
| 721 | # this needs to be numeric, i.e. the "1" in "1_alpha" | ||
| 722 | if not silent: | ||
| 723 | error("package version must have numeric part before the '_'") | ||
| 724 | __ververify_cache__[myorigval] = 0 | ||
| 725 | return 0 | ||
| 726 | |||
| 727 | for mye in _package_ends_: | ||
| 728 | if ep[1][0:len(mye)] == mye: | ||
| 729 | if len(mye) == len(ep[1]): | ||
| 730 | # no trailing numeric is ok | ||
| 731 | __ververify_cache__[myorigval] = 1 | ||
| 732 | return 1 | ||
| 733 | else: | ||
| 734 | try: | ||
| 735 | foo = string.atoi(ep[1][len(mye):]) | ||
| 736 | __ververify_cache__[myorigval] = 1 | ||
| 737 | return 1 | ||
| 738 | except: | ||
| 739 | # if no _package_weights_ work, *then* we return 0 | ||
| 740 | pass | ||
| 741 | if not silent: | ||
| 742 | error("package version extension after '_' is invalid") | ||
| 743 | __ververify_cache__[myorigval] = 0 | ||
| 744 | return 0 | ||
| 745 | |||
| 746 | |||
| 747 | def isjustname(mypkg): | ||
| 748 | myparts = string.split(mypkg,'-') | ||
| 749 | for x in myparts: | ||
| 750 | if ververify(x): | ||
| 751 | return 0 | ||
| 752 | return 1 | ||
| 753 | |||
| 754 | |||
| 755 | _isspecific_cache_={} | ||
| 756 | |||
| 757 | def isspecific(mypkg): | ||
| 758 | "now supports packages with no category" | ||
| 759 | try: | ||
| 760 | return __isspecific_cache__[mypkg] | ||
| 761 | except: | ||
| 762 | pass | ||
| 763 | |||
| 764 | mysplit = string.split(mypkg,"/") | ||
| 765 | if not isjustname(mysplit[-1]): | ||
| 766 | __isspecific_cache__[mypkg] = 1 | ||
| 767 | return 1 | ||
| 768 | __isspecific_cache__[mypkg] = 0 | ||
| 769 | return 0 | ||
| 770 | |||
| 771 | |||
| 772 | ####################################################################### | ||
| 773 | |||
| 774 | __pkgsplit_cache__={} | ||
| 775 | |||
| 776 | def pkgsplit(mypkg, silent=1): | ||
| 777 | |||
| 778 | """This function can be used as a package verification function. If | ||
| 779 | it is a valid name, pkgsplit will return a list containing: | ||
| 780 | [pkgname, pkgversion(norev), pkgrev ]. | ||
| 781 | |||
| 782 | >>> pkgsplit('') | ||
| 783 | >>> pkgsplit('x') | ||
| 784 | >>> pkgsplit('x-') | ||
| 785 | >>> pkgsplit('-1') | ||
| 786 | >>> pkgsplit('glibc-1.2-8.9-r7') | ||
| 787 | >>> pkgsplit('glibc-2.2.5-r7') | ||
| 788 | ['glibc', '2.2.5', 'r7'] | ||
| 789 | >>> pkgsplit('foo-1.2-1') | ||
| 790 | >>> pkgsplit('Mesa-3.0') | ||
| 791 | ['Mesa', '3.0', 'r0'] | ||
| 792 | """ | ||
| 793 | |||
| 794 | try: | ||
| 795 | return __pkgsplit_cache__[mypkg] | ||
| 796 | except KeyError: | ||
| 797 | pass | ||
| 798 | |||
| 799 | myparts = string.split(mypkg,'-') | ||
| 800 | if len(myparts) < 2: | ||
| 801 | if not silent: | ||
| 802 | error("package name without name or version part") | ||
| 803 | __pkgsplit_cache__[mypkg] = None | ||
| 804 | return None | ||
| 805 | for x in myparts: | ||
| 806 | if len(x) == 0: | ||
| 807 | if not silent: | ||
| 808 | error("package name with empty name or version part") | ||
| 809 | __pkgsplit_cache__[mypkg] = None | ||
| 810 | return None | ||
| 811 | # verify rev | ||
| 812 | revok = 0 | ||
| 813 | myrev = myparts[-1] | ||
| 814 | ververify(myrev, silent) | ||
| 815 | if len(myrev) and myrev[0] == "r": | ||
| 816 | try: | ||
| 817 | string.atoi(myrev[1:]) | ||
| 818 | revok = 1 | ||
| 819 | except: | ||
| 820 | pass | ||
| 821 | if revok: | ||
| 822 | if ververify(myparts[-2]): | ||
| 823 | if len(myparts) == 2: | ||
| 824 | __pkgsplit_cache__[mypkg] = None | ||
| 825 | return None | ||
| 826 | else: | ||
| 827 | for x in myparts[:-2]: | ||
| 828 | if ververify(x): | ||
| 829 | __pkgsplit_cache__[mypkg]=None | ||
| 830 | return None | ||
| 831 | # names can't have versiony looking parts | ||
| 832 | myval=[string.join(myparts[:-2],"-"),myparts[-2],myparts[-1]] | ||
| 833 | __pkgsplit_cache__[mypkg]=myval | ||
| 834 | return myval | ||
| 835 | else: | ||
| 836 | __pkgsplit_cache__[mypkg] = None | ||
| 837 | return None | ||
| 838 | |||
| 839 | elif ververify(myparts[-1],silent): | ||
| 840 | if len(myparts)==1: | ||
| 841 | if not silent: | ||
| 842 | print "!!! Name error in",mypkg+": missing name part." | ||
| 843 | __pkgsplit_cache__[mypkg]=None | ||
| 844 | return None | ||
| 845 | else: | ||
| 846 | for x in myparts[:-1]: | ||
| 847 | if ververify(x): | ||
| 848 | if not silent: error("package name has multiple version parts") | ||
| 849 | __pkgsplit_cache__[mypkg] = None | ||
| 850 | return None | ||
| 851 | myval = [string.join(myparts[:-1],"-"), myparts[-1],"r0"] | ||
| 852 | __pkgsplit_cache__[mypkg] = myval | ||
| 853 | return myval | ||
| 854 | else: | ||
| 855 | __pkgsplit_cache__[mypkg] = None | ||
| 856 | return None | ||
| 857 | |||
| 858 | |||
| 859 | ####################################################################### | ||
| 860 | |||
| 861 | __catpkgsplit_cache__ = {} | ||
| 862 | |||
| 863 | def catpkgsplit(mydata,silent=1): | ||
| 864 | """returns [cat, pkgname, version, rev ] | ||
| 865 | |||
| 866 | >>> catpkgsplit('sys-libs/glibc-1.2-r7') | ||
| 867 | ['sys-libs', 'glibc', '1.2', 'r7'] | ||
| 868 | >>> catpkgsplit('glibc-1.2-r7') | ||
| 869 | [None, 'glibc', '1.2', 'r7'] | ||
| 870 | """ | ||
| 871 | |||
| 872 | try: | ||
| 873 | return __catpkgsplit_cache__[mydata] | ||
| 874 | except KeyError: | ||
| 875 | pass | ||
| 876 | |||
| 877 | cat = os.path.basename(os.path.dirname(mydata)) | ||
| 878 | mydata = os.path.join(cat, os.path.basename(mydata)) | ||
| 879 | if mydata[-3:] == '.bb': | ||
| 880 | mydata = mydata[:-3] | ||
| 881 | |||
| 882 | mysplit = mydata.split("/") | ||
| 883 | p_split = None | ||
| 884 | splitlen = len(mysplit) | ||
| 885 | if splitlen == 1: | ||
| 886 | retval = [None] | ||
| 887 | p_split = pkgsplit(mydata,silent) | ||
| 888 | else: | ||
| 889 | retval = [mysplit[splitlen - 2]] | ||
| 890 | p_split = pkgsplit(mysplit[splitlen - 1],silent) | ||
| 891 | if not p_split: | ||
| 892 | __catpkgsplit_cache__[mydata] = None | ||
| 893 | return None | ||
| 894 | retval.extend(p_split) | ||
| 895 | __catpkgsplit_cache__[mydata] = retval | ||
| 896 | return retval | ||
| 897 | |||
| 898 | |||
| 899 | ####################################################################### | ||
| 900 | |||
| 901 | __vercmp_cache__ = {} | ||
| 902 | |||
| 903 | def vercmp(val1,val2): | ||
| 904 | """This takes two version strings and returns an integer to tell you whether | ||
| 905 | the versions are the same, val1>val2 or val2>val1. | ||
| 906 | |||
| 907 | >>> vercmp('1', '2') | ||
| 908 | -1.0 | ||
| 909 | >>> vercmp('2', '1') | ||
| 910 | 1.0 | ||
| 911 | >>> vercmp('1', '1.0') | ||
| 912 | 0 | ||
| 913 | >>> vercmp('1', '1.1') | ||
| 914 | -1.0 | ||
| 915 | >>> vercmp('1.1', '1_p2') | ||
| 916 | 1.0 | ||
| 917 | """ | ||
| 918 | |||
| 919 | # quick short-circuit | ||
| 920 | if val1 == val2: | ||
| 921 | return 0 | ||
| 922 | valkey = val1+" "+val2 | ||
| 923 | |||
| 924 | # cache lookup | ||
| 925 | try: | ||
| 926 | return __vercmp_cache__[valkey] | ||
| 927 | try: | ||
| 928 | return - __vercmp_cache__[val2+" "+val1] | ||
| 929 | except KeyError: | ||
| 930 | pass | ||
| 931 | except KeyError: | ||
| 932 | pass | ||
| 933 | |||
| 934 | # consider 1_p2 vc 1.1 | ||
| 935 | # after expansion will become (1_p2,0) vc (1,1) | ||
| 936 | # then 1_p2 is compared with 1 before 0 is compared with 1 | ||
| 937 | # to solve the bug we need to convert it to (1,0_p2) | ||
| 938 | # by splitting _prepart part and adding it back _after_expansion | ||
| 939 | |||
| 940 | val1_prepart = val2_prepart = '' | ||
| 941 | if val1.count('_'): | ||
| 942 | val1, val1_prepart = val1.split('_', 1) | ||
| 943 | if val2.count('_'): | ||
| 944 | val2, val2_prepart = val2.split('_', 1) | ||
| 945 | |||
| 946 | # replace '-' by '.' | ||
| 947 | # FIXME: Is it needed? can val1/2 contain '-'? | ||
| 948 | |||
| 949 | val1 = string.split(val1,'-') | ||
| 950 | if len(val1) == 2: | ||
| 951 | val1[0] = val1[0] +"."+ val1[1] | ||
| 952 | val2 = string.split(val2,'-') | ||
| 953 | if len(val2) == 2: | ||
| 954 | val2[0] = val2[0] +"."+ val2[1] | ||
| 955 | |||
| 956 | val1 = string.split(val1[0],'.') | ||
| 957 | val2 = string.split(val2[0],'.') | ||
| 958 | |||
| 959 | # add back decimal point so that .03 does not become "3" ! | ||
| 960 | for x in range(1,len(val1)): | ||
| 961 | if val1[x][0] == '0' : | ||
| 962 | val1[x] = '.' + val1[x] | ||
| 963 | for x in range(1,len(val2)): | ||
| 964 | if val2[x][0] == '0' : | ||
| 965 | val2[x] = '.' + val2[x] | ||
| 966 | |||
| 967 | # extend varion numbers | ||
| 968 | if len(val2) < len(val1): | ||
| 969 | val2.extend(["0"]*(len(val1)-len(val2))) | ||
| 970 | elif len(val1) < len(val2): | ||
| 971 | val1.extend(["0"]*(len(val2)-len(val1))) | ||
| 972 | |||
| 973 | # add back _prepart tails | ||
| 974 | if val1_prepart: | ||
| 975 | val1[-1] += '_' + val1_prepart | ||
| 976 | if val2_prepart: | ||
| 977 | val2[-1] += '_' + val2_prepart | ||
| 978 | # The above code will extend version numbers out so they | ||
| 979 | # have the same number of digits. | ||
| 980 | for x in range(0,len(val1)): | ||
| 981 | cmp1 = relparse(val1[x]) | ||
| 982 | cmp2 = relparse(val2[x]) | ||
| 983 | for y in range(0,3): | ||
| 984 | myret = cmp1[y] - cmp2[y] | ||
| 985 | if myret != 0: | ||
| 986 | __vercmp_cache__[valkey] = myret | ||
| 987 | return myret | ||
| 988 | __vercmp_cache__[valkey] = 0 | ||
| 989 | return 0 | ||
| 990 | |||
| 991 | |||
| 992 | ####################################################################### | ||
| 993 | |||
| 994 | def pkgcmp(pkg1,pkg2): | ||
| 995 | """ Compares two packages, which should have been split via | ||
| 996 | pkgsplit(). if the return value val is less than zero, then pkg2 is | ||
| 997 | newer than pkg1, zero if equal and positive if older. | ||
| 998 | |||
| 999 | >>> pkgcmp(['glibc', '2.2.5', 'r7'], ['glibc', '2.2.5', 'r7']) | ||
| 1000 | 0 | ||
| 1001 | >>> pkgcmp(['glibc', '2.2.5', 'r4'], ['glibc', '2.2.5', 'r7']) | ||
| 1002 | -1 | ||
| 1003 | >>> pkgcmp(['glibc', '2.2.5', 'r7'], ['glibc', '2.2.5', 'r2']) | ||
| 1004 | 1 | ||
| 1005 | """ | ||
| 1006 | |||
| 1007 | mycmp = vercmp(pkg1[1],pkg2[1]) | ||
| 1008 | if mycmp > 0: | ||
| 1009 | return 1 | ||
| 1010 | if mycmp < 0: | ||
| 1011 | return -1 | ||
| 1012 | r1=string.atoi(pkg1[2][1:]) | ||
| 1013 | r2=string.atoi(pkg2[2][1:]) | ||
| 1014 | if r1 > r2: | ||
| 1015 | return 1 | ||
| 1016 | if r2 > r1: | ||
| 1017 | return -1 | ||
| 1018 | return 0 | ||
| 1019 | |||
| 1020 | |||
| 1021 | ####################################################################### | ||
| 1022 | |||
| 1023 | def dep_parenreduce(mysplit, mypos=0): | ||
| 1024 | """Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists: | ||
| 1025 | |||
| 1026 | >>> dep_parenreduce(['']) | ||
| 1027 | [''] | ||
| 1028 | >>> dep_parenreduce(['1', '2', '3']) | ||
| 1029 | ['1', '2', '3'] | ||
| 1030 | >>> dep_parenreduce(['1', '(', '2', '3', ')', '4']) | ||
| 1031 | ['1', ['2', '3'], '4'] | ||
| 1032 | """ | ||
| 1033 | |||
| 1034 | while mypos < len(mysplit): | ||
| 1035 | if mysplit[mypos] == "(": | ||
| 1036 | firstpos = mypos | ||
| 1037 | mypos = mypos + 1 | ||
| 1038 | while mypos < len(mysplit): | ||
| 1039 | if mysplit[mypos] == ")": | ||
| 1040 | mysplit[firstpos:mypos+1] = [mysplit[firstpos+1:mypos]] | ||
| 1041 | mypos = firstpos | ||
| 1042 | break | ||
| 1043 | elif mysplit[mypos] == "(": | ||
| 1044 | # recurse | ||
| 1045 | mysplit = dep_parenreduce(mysplit,mypos) | ||
| 1046 | mypos = mypos + 1 | ||
| 1047 | mypos = mypos + 1 | ||
| 1048 | return mysplit | ||
| 1049 | |||
| 1050 | |||
| 1051 | def dep_opconvert(mysplit, myuse): | ||
| 1052 | "Does dependency operator conversion" | ||
| 1053 | |||
| 1054 | mypos = 0 | ||
| 1055 | newsplit = [] | ||
| 1056 | while mypos < len(mysplit): | ||
| 1057 | if type(mysplit[mypos]) == types.ListType: | ||
| 1058 | newsplit.append(dep_opconvert(mysplit[mypos],myuse)) | ||
| 1059 | mypos += 1 | ||
| 1060 | elif mysplit[mypos] == ")": | ||
| 1061 | # mismatched paren, error | ||
| 1062 | return None | ||
| 1063 | elif mysplit[mypos]=="||": | ||
| 1064 | if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType): | ||
| 1065 | # || must be followed by paren'd list | ||
| 1066 | return None | ||
| 1067 | try: | ||
| 1068 | mynew = dep_opconvert(mysplit[mypos+1],myuse) | ||
| 1069 | except Exception, e: | ||
| 1070 | error("unable to satisfy OR dependancy: " + string.join(mysplit," || ")) | ||
| 1071 | raise e | ||
| 1072 | mynew[0:0] = ["||"] | ||
| 1073 | newsplit.append(mynew) | ||
| 1074 | mypos += 2 | ||
| 1075 | elif mysplit[mypos][-1] == "?": | ||
| 1076 | # use clause, i.e "gnome? ( foo bar )" | ||
| 1077 | # this is a quick and dirty hack so that repoman can enable all USE vars: | ||
| 1078 | if (len(myuse) == 1) and (myuse[0] == "*"): | ||
| 1079 | # enable it even if it's ! (for repoman) but kill it if it's | ||
| 1080 | # an arch variable that isn't for this arch. XXX Sparc64? | ||
| 1081 | if (mysplit[mypos][:-1] not in settings.usemask) or \ | ||
| 1082 | (mysplit[mypos][:-1]==settings["ARCH"]): | ||
| 1083 | enabled=1 | ||
| 1084 | else: | ||
| 1085 | enabled=0 | ||
| 1086 | else: | ||
| 1087 | if mysplit[mypos][0] == "!": | ||
| 1088 | myusevar = mysplit[mypos][1:-1] | ||
| 1089 | enabled = not myusevar in myuse | ||
| 1090 | #if myusevar in myuse: | ||
| 1091 | # enabled = 0 | ||
| 1092 | #else: | ||
| 1093 | # enabled = 1 | ||
| 1094 | else: | ||
| 1095 | myusevar=mysplit[mypos][:-1] | ||
| 1096 | enabled = myusevar in myuse | ||
| 1097 | #if myusevar in myuse: | ||
| 1098 | # enabled=1 | ||
| 1099 | #else: | ||
| 1100 | # enabled=0 | ||
| 1101 | if (mypos +2 < len(mysplit)) and (mysplit[mypos+2] == ":"): | ||
| 1102 | # colon mode | ||
| 1103 | if enabled: | ||
| 1104 | # choose the first option | ||
| 1105 | if type(mysplit[mypos+1]) == types.ListType: | ||
| 1106 | newsplit.append(dep_opconvert(mysplit[mypos+1],myuse)) | ||
| 1107 | else: | ||
| 1108 | newsplit.append(mysplit[mypos+1]) | ||
| 1109 | else: | ||
| 1110 | # choose the alternate option | ||
| 1111 | if type(mysplit[mypos+1]) == types.ListType: | ||
| 1112 | newsplit.append(dep_opconvert(mysplit[mypos+3],myuse)) | ||
| 1113 | else: | ||
| 1114 | newsplit.append(mysplit[mypos+3]) | ||
| 1115 | mypos += 4 | ||
| 1116 | else: | ||
| 1117 | # normal use mode | ||
| 1118 | if enabled: | ||
| 1119 | if type(mysplit[mypos+1]) == types.ListType: | ||
| 1120 | newsplit.append(dep_opconvert(mysplit[mypos+1],myuse)) | ||
| 1121 | else: | ||
| 1122 | newsplit.append(mysplit[mypos+1]) | ||
| 1123 | # otherwise, continue | ||
| 1124 | mypos += 2 | ||
| 1125 | else: | ||
| 1126 | # normal item | ||
| 1127 | newsplit.append(mysplit[mypos]) | ||
| 1128 | mypos += 1 | ||
| 1129 | return newsplit | ||
| 1130 | |||
| 1131 | if __name__ == "__main__": | ||
| 1132 | import doctest, bb | ||
| 1133 | doctest.testmod(bb) | ||
diff --git a/bitbake-dev/lib/bb/build.py b/bitbake-dev/lib/bb/build.py new file mode 100644 index 0000000000..ca7cfbc6bb --- /dev/null +++ b/bitbake-dev/lib/bb/build.py | |||
| @@ -0,0 +1,377 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # BitBake 'Build' implementation | ||
| 5 | # | ||
| 6 | # Core code for function execution and task handling in the | ||
| 7 | # BitBake build tools. | ||
| 8 | # | ||
| 9 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 10 | # | ||
| 11 | # Based on Gentoo's portage.py. | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | # | ||
| 26 | #Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 27 | |||
| 28 | from bb import data, event, mkdirhier, utils | ||
| 29 | import bb, os, sys | ||
| 30 | |||
| 31 | # events | ||
| 32 | class FuncFailed(Exception): | ||
| 33 | """ | ||
| 34 | Executed function failed | ||
| 35 | First parameter a message | ||
| 36 | Second paramter is a logfile (optional) | ||
| 37 | """ | ||
| 38 | |||
| 39 | class EventException(Exception): | ||
| 40 | """Exception which is associated with an Event.""" | ||
| 41 | |||
| 42 | def __init__(self, msg, event): | ||
| 43 | self.args = msg, event | ||
| 44 | |||
| 45 | class TaskBase(event.Event): | ||
| 46 | """Base class for task events""" | ||
| 47 | |||
| 48 | def __init__(self, t, d ): | ||
| 49 | self._task = t | ||
| 50 | self._package = bb.data.getVar("PF", d, 1) | ||
| 51 | event.Event.__init__(self, d) | ||
| 52 | self._message = "package %s: task %s: %s" % (bb.data.getVar("PF", d, 1), t, bb.event.getName(self)[4:]) | ||
| 53 | |||
| 54 | def getTask(self): | ||
| 55 | return self._task | ||
| 56 | |||
| 57 | def setTask(self, task): | ||
| 58 | self._task = task | ||
| 59 | |||
| 60 | task = property(getTask, setTask, None, "task property") | ||
| 61 | |||
| 62 | class TaskStarted(TaskBase): | ||
| 63 | """Task execution started""" | ||
| 64 | |||
| 65 | class TaskSucceeded(TaskBase): | ||
| 66 | """Task execution completed""" | ||
| 67 | |||
| 68 | class TaskFailed(TaskBase): | ||
| 69 | """Task execution failed""" | ||
| 70 | def __init__(self, msg, logfile, t, d ): | ||
| 71 | self.logfile = logfile | ||
| 72 | self.msg = msg | ||
| 73 | TaskBase.__init__(self, t, d) | ||
| 74 | |||
| 75 | class InvalidTask(TaskBase): | ||
| 76 | """Invalid Task""" | ||
| 77 | |||
| 78 | # functions | ||
| 79 | |||
| 80 | def exec_func(func, d, dirs = None): | ||
| 81 | """Execute an BB 'function'""" | ||
| 82 | |||
| 83 | body = data.getVar(func, d) | ||
| 84 | if not body: | ||
| 85 | return | ||
| 86 | |||
| 87 | flags = data.getVarFlags(func, d) | ||
| 88 | for item in ['deps', 'check', 'interactive', 'python', 'cleandirs', 'dirs', 'lockfiles', 'fakeroot']: | ||
| 89 | if not item in flags: | ||
| 90 | flags[item] = None | ||
| 91 | |||
| 92 | ispython = flags['python'] | ||
| 93 | |||
| 94 | cleandirs = (data.expand(flags['cleandirs'], d) or "").split() | ||
| 95 | for cdir in cleandirs: | ||
| 96 | os.system("rm -rf %s" % cdir) | ||
| 97 | |||
| 98 | if dirs: | ||
| 99 | dirs = data.expand(dirs, d) | ||
| 100 | else: | ||
| 101 | dirs = (data.expand(flags['dirs'], d) or "").split() | ||
| 102 | for adir in dirs: | ||
| 103 | mkdirhier(adir) | ||
| 104 | |||
| 105 | if len(dirs) > 0: | ||
| 106 | adir = dirs[-1] | ||
| 107 | else: | ||
| 108 | adir = data.getVar('B', d, 1) | ||
| 109 | |||
| 110 | # Save current directory | ||
| 111 | try: | ||
| 112 | prevdir = os.getcwd() | ||
| 113 | except OSError: | ||
| 114 | prevdir = data.getVar('TOPDIR', d, True) | ||
| 115 | |||
| 116 | # Setup logfiles | ||
| 117 | t = data.getVar('T', d, 1) | ||
| 118 | if not t: | ||
| 119 | bb.msg.fatal(bb.msg.domain.Build, "T not set") | ||
| 120 | mkdirhier(t) | ||
| 121 | # Gross hack, FIXME | ||
| 122 | import random | ||
| 123 | logfile = "%s/log.%s.%s.%s" % (t, func, str(os.getpid()),random.random()) | ||
| 124 | runfile = "%s/run.%s.%s" % (t, func, str(os.getpid())) | ||
| 125 | |||
| 126 | # Change to correct directory (if specified) | ||
| 127 | if adir and os.access(adir, os.F_OK): | ||
| 128 | os.chdir(adir) | ||
| 129 | |||
| 130 | # Handle logfiles | ||
| 131 | si = file('/dev/null', 'r') | ||
| 132 | try: | ||
| 133 | if bb.msg.debug_level['default'] > 0 or ispython: | ||
| 134 | so = os.popen("tee \"%s\"" % logfile, "w") | ||
| 135 | else: | ||
| 136 | so = file(logfile, 'w') | ||
| 137 | except OSError, e: | ||
| 138 | bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) | ||
| 139 | pass | ||
| 140 | |||
| 141 | se = so | ||
| 142 | |||
| 143 | # Dup the existing fds so we dont lose them | ||
| 144 | osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()] | ||
| 145 | oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()] | ||
| 146 | ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()] | ||
| 147 | |||
| 148 | # Replace those fds with our own | ||
| 149 | os.dup2(si.fileno(), osi[1]) | ||
| 150 | os.dup2(so.fileno(), oso[1]) | ||
| 151 | os.dup2(se.fileno(), ose[1]) | ||
| 152 | |||
| 153 | locks = [] | ||
| 154 | lockfiles = (data.expand(flags['lockfiles'], d) or "").split() | ||
| 155 | for lock in lockfiles: | ||
| 156 | locks.append(bb.utils.lockfile(lock)) | ||
| 157 | |||
| 158 | try: | ||
| 159 | # Run the function | ||
| 160 | if ispython: | ||
| 161 | exec_func_python(func, d, runfile, logfile) | ||
| 162 | else: | ||
| 163 | exec_func_shell(func, d, runfile, logfile, flags) | ||
| 164 | |||
| 165 | # Restore original directory | ||
| 166 | try: | ||
| 167 | os.chdir(prevdir) | ||
| 168 | except: | ||
| 169 | pass | ||
| 170 | |||
| 171 | finally: | ||
| 172 | |||
| 173 | # Unlock any lockfiles | ||
| 174 | for lock in locks: | ||
| 175 | bb.utils.unlockfile(lock) | ||
| 176 | |||
| 177 | # Restore the backup fds | ||
| 178 | os.dup2(osi[0], osi[1]) | ||
| 179 | os.dup2(oso[0], oso[1]) | ||
| 180 | os.dup2(ose[0], ose[1]) | ||
| 181 | |||
| 182 | # Close our logs | ||
| 183 | si.close() | ||
| 184 | so.close() | ||
| 185 | se.close() | ||
| 186 | |||
| 187 | # Close the backup fds | ||
| 188 | os.close(osi[0]) | ||
| 189 | os.close(oso[0]) | ||
| 190 | os.close(ose[0]) | ||
| 191 | |||
| 192 | def exec_func_python(func, d, runfile, logfile): | ||
| 193 | """Execute a python BB 'function'""" | ||
| 194 | import re, os | ||
| 195 | |||
| 196 | bbfile = bb.data.getVar('FILE', d, 1) | ||
| 197 | tmp = "def " + func + "():\n%s" % data.getVar(func, d) | ||
| 198 | tmp += '\n' + func + '()' | ||
| 199 | |||
| 200 | f = open(runfile, "w") | ||
| 201 | f.write(tmp) | ||
| 202 | comp = utils.better_compile(tmp, func, bbfile) | ||
| 203 | g = {} # globals | ||
| 204 | g['bb'] = bb | ||
| 205 | g['os'] = os | ||
| 206 | g['d'] = d | ||
| 207 | utils.better_exec(comp, g, tmp, bbfile) | ||
| 208 | |||
| 209 | |||
| 210 | def exec_func_shell(func, d, runfile, logfile, flags): | ||
| 211 | """Execute a shell BB 'function' Returns true if execution was successful. | ||
| 212 | |||
| 213 | For this, it creates a bash shell script in the tmp dectory, writes the local | ||
| 214 | data into it and finally executes. The output of the shell will end in a log file and stdout. | ||
| 215 | |||
| 216 | Note on directory behavior. The 'dirs' varflag should contain a list | ||
| 217 | of the directories you need created prior to execution. The last | ||
| 218 | item in the list is where we will chdir/cd to. | ||
| 219 | """ | ||
| 220 | |||
| 221 | deps = flags['deps'] | ||
| 222 | check = flags['check'] | ||
| 223 | if check in globals(): | ||
| 224 | if globals()[check](func, deps): | ||
| 225 | return | ||
| 226 | |||
| 227 | f = open(runfile, "w") | ||
| 228 | f.write("#!/bin/sh -e\n") | ||
| 229 | if bb.msg.debug_level['default'] > 0: f.write("set -x\n") | ||
| 230 | data.emit_env(f, d) | ||
| 231 | |||
| 232 | f.write("cd %s\n" % os.getcwd()) | ||
| 233 | if func: f.write("%s\n" % func) | ||
| 234 | f.close() | ||
| 235 | os.chmod(runfile, 0775) | ||
| 236 | if not func: | ||
| 237 | bb.msg.error(bb.msg.domain.Build, "Function not specified") | ||
| 238 | raise FuncFailed("Function not specified for exec_func_shell") | ||
| 239 | |||
| 240 | # execute function | ||
| 241 | if flags['fakeroot']: | ||
| 242 | maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1) | ||
| 243 | else: | ||
| 244 | maybe_fakeroot = '' | ||
| 245 | lang_environment = "LC_ALL=C " | ||
| 246 | ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile)) | ||
| 247 | |||
| 248 | if ret == 0: | ||
| 249 | return | ||
| 250 | |||
| 251 | bb.msg.error(bb.msg.domain.Build, "Function %s failed" % func) | ||
| 252 | raise FuncFailed("function %s failed" % func, logfile) | ||
| 253 | |||
| 254 | |||
| 255 | def exec_task(task, d): | ||
| 256 | """Execute an BB 'task' | ||
| 257 | |||
| 258 | The primary difference between executing a task versus executing | ||
| 259 | a function is that a task exists in the task digraph, and therefore | ||
| 260 | has dependencies amongst other tasks.""" | ||
| 261 | |||
| 262 | # Check whther this is a valid task | ||
| 263 | if not data.getVarFlag(task, 'task', d): | ||
| 264 | raise EventException("No such task", InvalidTask(task, d)) | ||
| 265 | |||
| 266 | try: | ||
| 267 | bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task) | ||
| 268 | old_overrides = data.getVar('OVERRIDES', d, 0) | ||
| 269 | localdata = data.createCopy(d) | ||
| 270 | data.setVar('OVERRIDES', 'task_%s:%s' % (task, old_overrides), localdata) | ||
| 271 | data.update_data(localdata) | ||
| 272 | event.fire(TaskStarted(task, localdata)) | ||
| 273 | exec_func(task, localdata) | ||
| 274 | event.fire(TaskSucceeded(task, localdata)) | ||
| 275 | except FuncFailed, message: | ||
| 276 | # Try to extract the optional logfile | ||
| 277 | try: | ||
| 278 | (msg, logfile) = message | ||
| 279 | except: | ||
| 280 | logfile = None | ||
| 281 | msg = message | ||
| 282 | bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message ) | ||
| 283 | failedevent = TaskFailed(msg, logfile, task, d) | ||
| 284 | event.fire(failedevent) | ||
| 285 | raise EventException("Function failed in task: %s" % message, failedevent) | ||
| 286 | |||
| 287 | # make stamp, or cause event and raise exception | ||
| 288 | if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d): | ||
| 289 | make_stamp(task, d) | ||
| 290 | |||
| 291 | def extract_stamp(d, fn): | ||
| 292 | """ | ||
| 293 | Extracts stamp format which is either a data dictonary (fn unset) | ||
| 294 | or a dataCache entry (fn set). | ||
| 295 | """ | ||
| 296 | if fn: | ||
| 297 | return d.stamp[fn] | ||
| 298 | return data.getVar('STAMP', d, 1) | ||
| 299 | |||
| 300 | def stamp_internal(task, d, file_name): | ||
| 301 | """ | ||
| 302 | Internal stamp helper function | ||
| 303 | Removes any stamp for the given task | ||
| 304 | Makes sure the stamp directory exists | ||
| 305 | Returns the stamp path+filename | ||
| 306 | """ | ||
| 307 | stamp = extract_stamp(d, file_name) | ||
| 308 | if not stamp: | ||
| 309 | return | ||
| 310 | stamp = "%s.%s" % (stamp, task) | ||
| 311 | mkdirhier(os.path.dirname(stamp)) | ||
| 312 | # Remove the file and recreate to force timestamp | ||
| 313 | # change on broken NFS filesystems | ||
| 314 | if os.access(stamp, os.F_OK): | ||
| 315 | os.remove(stamp) | ||
| 316 | return stamp | ||
| 317 | |||
| 318 | def make_stamp(task, d, file_name = None): | ||
| 319 | """ | ||
| 320 | Creates/updates a stamp for a given task | ||
| 321 | (d can be a data dict or dataCache) | ||
| 322 | """ | ||
| 323 | stamp = stamp_internal(task, d, file_name) | ||
| 324 | if stamp: | ||
| 325 | f = open(stamp, "w") | ||
| 326 | f.close() | ||
| 327 | |||
| 328 | def del_stamp(task, d, file_name = None): | ||
| 329 | """ | ||
| 330 | Removes a stamp for a given task | ||
| 331 | (d can be a data dict or dataCache) | ||
| 332 | """ | ||
| 333 | stamp_internal(task, d, file_name) | ||
| 334 | |||
| 335 | def add_tasks(tasklist, d): | ||
| 336 | task_deps = data.getVar('_task_deps', d) | ||
| 337 | if not task_deps: | ||
| 338 | task_deps = {} | ||
| 339 | if not 'tasks' in task_deps: | ||
| 340 | task_deps['tasks'] = [] | ||
| 341 | if not 'parents' in task_deps: | ||
| 342 | task_deps['parents'] = {} | ||
| 343 | |||
| 344 | for task in tasklist: | ||
| 345 | task = data.expand(task, d) | ||
| 346 | data.setVarFlag(task, 'task', 1, d) | ||
| 347 | |||
| 348 | if not task in task_deps['tasks']: | ||
| 349 | task_deps['tasks'].append(task) | ||
| 350 | |||
| 351 | flags = data.getVarFlags(task, d) | ||
| 352 | def getTask(name): | ||
| 353 | if not name in task_deps: | ||
| 354 | task_deps[name] = {} | ||
| 355 | if name in flags: | ||
| 356 | deptask = data.expand(flags[name], d) | ||
| 357 | task_deps[name][task] = deptask | ||
| 358 | getTask('depends') | ||
| 359 | getTask('deptask') | ||
| 360 | getTask('rdeptask') | ||
| 361 | getTask('recrdeptask') | ||
| 362 | getTask('nostamp') | ||
| 363 | task_deps['parents'][task] = [] | ||
| 364 | for dep in flags['deps']: | ||
| 365 | dep = data.expand(dep, d) | ||
| 366 | task_deps['parents'][task].append(dep) | ||
| 367 | |||
| 368 | # don't assume holding a reference | ||
| 369 | data.setVar('_task_deps', task_deps, d) | ||
| 370 | |||
| 371 | def remove_task(task, kill, d): | ||
| 372 | """Remove an BB 'task'. | ||
| 373 | |||
| 374 | If kill is 1, also remove tasks that depend on this task.""" | ||
| 375 | |||
| 376 | data.delVarFlag(task, 'task', d) | ||
| 377 | |||
diff --git a/bitbake-dev/lib/bb/cache.py b/bitbake-dev/lib/bb/cache.py new file mode 100644 index 0000000000..bcf393a578 --- /dev/null +++ b/bitbake-dev/lib/bb/cache.py | |||
| @@ -0,0 +1,465 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # BitBake 'Event' implementation | ||
| 5 | # | ||
| 6 | # Caching of bitbake variables before task execution | ||
| 7 | |||
| 8 | # Copyright (C) 2006 Richard Purdie | ||
| 9 | |||
| 10 | # but small sections based on code from bin/bitbake: | ||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 13 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | ||
| 14 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
| 15 | # Copyright (C) 2005 ROAD GmbH | ||
| 16 | # | ||
| 17 | # This program is free software; you can redistribute it and/or modify | ||
| 18 | # it under the terms of the GNU General Public License version 2 as | ||
| 19 | # published by the Free Software Foundation. | ||
| 20 | # | ||
| 21 | # This program is distributed in the hope that it will be useful, | ||
| 22 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 23 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 24 | # GNU General Public License for more details. | ||
| 25 | # | ||
| 26 | # You should have received a copy of the GNU General Public License along | ||
| 27 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 28 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 29 | |||
| 30 | |||
| 31 | import os, re | ||
| 32 | import bb.data | ||
| 33 | import bb.utils | ||
| 34 | from sets import Set | ||
| 35 | |||
| 36 | try: | ||
| 37 | import cPickle as pickle | ||
| 38 | except ImportError: | ||
| 39 | import pickle | ||
| 40 | bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.") | ||
| 41 | |||
| 42 | __cache_version__ = "128" | ||
| 43 | |||
| 44 | class Cache: | ||
| 45 | """ | ||
| 46 | BitBake Cache implementation | ||
| 47 | """ | ||
| 48 | def __init__(self, cooker): | ||
| 49 | |||
| 50 | |||
| 51 | self.cachedir = bb.data.getVar("CACHE", cooker.configuration.data, True) | ||
| 52 | self.clean = {} | ||
| 53 | self.checked = {} | ||
| 54 | self.depends_cache = {} | ||
| 55 | self.data = None | ||
| 56 | self.data_fn = None | ||
| 57 | self.cacheclean = True | ||
| 58 | |||
| 59 | if self.cachedir in [None, '']: | ||
| 60 | self.has_cache = False | ||
| 61 | bb.msg.note(1, bb.msg.domain.Cache, "Not using a cache. Set CACHE = <directory> to enable.") | ||
| 62 | else: | ||
| 63 | self.has_cache = True | ||
| 64 | self.cachefile = os.path.join(self.cachedir,"bb_cache.dat") | ||
| 65 | |||
| 66 | bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) | ||
| 67 | try: | ||
| 68 | os.stat( self.cachedir ) | ||
| 69 | except OSError: | ||
| 70 | bb.mkdirhier( self.cachedir ) | ||
| 71 | |||
| 72 | if not self.has_cache: | ||
| 73 | return | ||
| 74 | |||
| 75 | # If any of configuration.data's dependencies are newer than the | ||
| 76 | # cache there isn't even any point in loading it... | ||
| 77 | newest_mtime = 0 | ||
| 78 | deps = bb.data.getVar("__depends", cooker.configuration.data, True) | ||
| 79 | for f,old_mtime in deps: | ||
| 80 | if old_mtime > newest_mtime: | ||
| 81 | newest_mtime = old_mtime | ||
| 82 | |||
| 83 | if bb.parse.cached_mtime_noerror(self.cachefile) >= newest_mtime: | ||
| 84 | try: | ||
| 85 | p = pickle.Unpickler(file(self.cachefile, "rb")) | ||
| 86 | self.depends_cache, version_data = p.load() | ||
| 87 | if version_data['CACHE_VER'] != __cache_version__: | ||
| 88 | raise ValueError, 'Cache Version Mismatch' | ||
| 89 | if version_data['BITBAKE_VER'] != bb.__version__: | ||
| 90 | raise ValueError, 'Bitbake Version Mismatch' | ||
| 91 | except EOFError: | ||
| 92 | bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") | ||
| 93 | self.depends_cache = {} | ||
| 94 | except: | ||
| 95 | bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...") | ||
| 96 | self.depends_cache = {} | ||
| 97 | else: | ||
| 98 | bb.msg.note(1, bb.msg.domain.Cache, "Out of date cache found, rebuilding...") | ||
| 99 | |||
| 100 | def getVar(self, var, fn, exp = 0): | ||
| 101 | """ | ||
| 102 | Gets the value of a variable | ||
| 103 | (similar to getVar in the data class) | ||
| 104 | |||
| 105 | There are two scenarios: | ||
| 106 | 1. We have cached data - serve from depends_cache[fn] | ||
| 107 | 2. We're learning what data to cache - serve from data | ||
| 108 | backend but add a copy of the data to the cache. | ||
| 109 | """ | ||
| 110 | if fn in self.clean: | ||
| 111 | return self.depends_cache[fn][var] | ||
| 112 | |||
| 113 | if not fn in self.depends_cache: | ||
| 114 | self.depends_cache[fn] = {} | ||
| 115 | |||
| 116 | if fn != self.data_fn: | ||
| 117 | # We're trying to access data in the cache which doesn't exist | ||
| 118 | # yet setData hasn't been called to setup the right access. Very bad. | ||
| 119 | bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn)) | ||
| 120 | |||
| 121 | self.cacheclean = False | ||
| 122 | result = bb.data.getVar(var, self.data, exp) | ||
| 123 | self.depends_cache[fn][var] = result | ||
| 124 | return result | ||
| 125 | |||
| 126 | def setData(self, fn, data): | ||
| 127 | """ | ||
| 128 | Called to prime bb_cache ready to learn which variables to cache. | ||
| 129 | Will be followed by calls to self.getVar which aren't cached | ||
| 130 | but can be fulfilled from self.data. | ||
| 131 | """ | ||
| 132 | self.data_fn = fn | ||
| 133 | self.data = data | ||
| 134 | |||
| 135 | # Make sure __depends makes the depends_cache | ||
| 136 | self.getVar("__depends", fn, True) | ||
| 137 | self.depends_cache[fn]["CACHETIMESTAMP"] = bb.parse.cached_mtime(fn) | ||
| 138 | |||
| 139 | def loadDataFull(self, fn, cfgData): | ||
| 140 | """ | ||
| 141 | Return a complete set of data for fn. | ||
| 142 | To do this, we need to parse the file. | ||
| 143 | """ | ||
| 144 | bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s (full)" % fn) | ||
| 145 | |||
| 146 | bb_data, skipped = self.load_bbfile(fn, cfgData) | ||
| 147 | return bb_data | ||
| 148 | |||
| 149 | def loadData(self, fn, cfgData): | ||
| 150 | """ | ||
| 151 | Load a subset of data for fn. | ||
| 152 | If the cached data is valid we do nothing, | ||
| 153 | To do this, we need to parse the file and set the system | ||
| 154 | to record the variables accessed. | ||
| 155 | Return the cache status and whether the file was skipped when parsed | ||
| 156 | """ | ||
| 157 | if fn not in self.checked: | ||
| 158 | self.cacheValidUpdate(fn) | ||
| 159 | if self.cacheValid(fn): | ||
| 160 | if "SKIPPED" in self.depends_cache[fn]: | ||
| 161 | return True, True | ||
| 162 | return True, False | ||
| 163 | |||
| 164 | bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s" % fn) | ||
| 165 | |||
| 166 | bb_data, skipped = self.load_bbfile(fn, cfgData) | ||
| 167 | self.setData(fn, bb_data) | ||
| 168 | return False, skipped | ||
| 169 | |||
| 170 | def cacheValid(self, fn): | ||
| 171 | """ | ||
| 172 | Is the cache valid for fn? | ||
| 173 | Fast version, no timestamps checked. | ||
| 174 | """ | ||
| 175 | # Is cache enabled? | ||
| 176 | if not self.has_cache: | ||
| 177 | return False | ||
| 178 | if fn in self.clean: | ||
| 179 | return True | ||
| 180 | return False | ||
| 181 | |||
| 182 | def cacheValidUpdate(self, fn): | ||
| 183 | """ | ||
| 184 | Is the cache valid for fn? | ||
| 185 | Make thorough (slower) checks including timestamps. | ||
| 186 | """ | ||
| 187 | # Is cache enabled? | ||
| 188 | if not self.has_cache: | ||
| 189 | return False | ||
| 190 | |||
| 191 | self.checked[fn] = "" | ||
| 192 | |||
| 193 | # Pretend we're clean so getVar works | ||
| 194 | self.clean[fn] = "" | ||
| 195 | |||
| 196 | # File isn't in depends_cache | ||
| 197 | if not fn in self.depends_cache: | ||
| 198 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % fn) | ||
| 199 | self.remove(fn) | ||
| 200 | return False | ||
| 201 | |||
| 202 | mtime = bb.parse.cached_mtime_noerror(fn) | ||
| 203 | |||
| 204 | # Check file still exists | ||
| 205 | if mtime == 0: | ||
| 206 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn) | ||
| 207 | self.remove(fn) | ||
| 208 | return False | ||
| 209 | |||
| 210 | # Check the file's timestamp | ||
| 211 | if mtime != self.getVar("CACHETIMESTAMP", fn, True): | ||
| 212 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn) | ||
| 213 | self.remove(fn) | ||
| 214 | return False | ||
| 215 | |||
| 216 | # Check dependencies are still valid | ||
| 217 | depends = self.getVar("__depends", fn, True) | ||
| 218 | if depends: | ||
| 219 | for f,old_mtime in depends: | ||
| 220 | fmtime = bb.parse.cached_mtime_noerror(f) | ||
| 221 | # Check if file still exists | ||
| 222 | if fmtime == 0: | ||
| 223 | self.remove(fn) | ||
| 224 | return False | ||
| 225 | |||
| 226 | if (fmtime != old_mtime): | ||
| 227 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f)) | ||
| 228 | self.remove(fn) | ||
| 229 | return False | ||
| 230 | |||
| 231 | #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn) | ||
| 232 | if not fn in self.clean: | ||
| 233 | self.clean[fn] = "" | ||
| 234 | |||
| 235 | return True | ||
| 236 | |||
| 237 | def skip(self, fn): | ||
| 238 | """ | ||
| 239 | Mark a fn as skipped | ||
| 240 | Called from the parser | ||
| 241 | """ | ||
| 242 | if not fn in self.depends_cache: | ||
| 243 | self.depends_cache[fn] = {} | ||
| 244 | self.depends_cache[fn]["SKIPPED"] = "1" | ||
| 245 | |||
| 246 | def remove(self, fn): | ||
| 247 | """ | ||
| 248 | Remove a fn from the cache | ||
| 249 | Called from the parser in error cases | ||
| 250 | """ | ||
| 251 | bb.msg.debug(1, bb.msg.domain.Cache, "Removing %s from cache" % fn) | ||
| 252 | if fn in self.depends_cache: | ||
| 253 | del self.depends_cache[fn] | ||
| 254 | if fn in self.clean: | ||
| 255 | del self.clean[fn] | ||
| 256 | |||
| 257 | def sync(self): | ||
| 258 | """ | ||
| 259 | Save the cache | ||
| 260 | Called from the parser when complete (or exiting) | ||
| 261 | """ | ||
| 262 | |||
| 263 | if not self.has_cache: | ||
| 264 | return | ||
| 265 | |||
| 266 | if self.cacheclean: | ||
| 267 | bb.msg.note(1, bb.msg.domain.Cache, "Cache is clean, not saving.") | ||
| 268 | return | ||
| 269 | |||
| 270 | version_data = {} | ||
| 271 | version_data['CACHE_VER'] = __cache_version__ | ||
| 272 | version_data['BITBAKE_VER'] = bb.__version__ | ||
| 273 | |||
| 274 | p = pickle.Pickler(file(self.cachefile, "wb" ), -1 ) | ||
| 275 | p.dump([self.depends_cache, version_data]) | ||
| 276 | |||
| 277 | def mtime(self, cachefile): | ||
| 278 | return bb.parse.cached_mtime_noerror(cachefile) | ||
| 279 | |||
| 280 | def handle_data(self, file_name, cacheData): | ||
| 281 | """ | ||
| 282 | Save data we need into the cache | ||
| 283 | """ | ||
| 284 | |||
| 285 | pn = self.getVar('PN', file_name, True) | ||
| 286 | pe = self.getVar('PE', file_name, True) or "0" | ||
| 287 | pv = self.getVar('PV', file_name, True) | ||
| 288 | pr = self.getVar('PR', file_name, True) | ||
| 289 | dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") | ||
| 290 | depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "") | ||
| 291 | packages = (self.getVar('PACKAGES', file_name, True) or "").split() | ||
| 292 | packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split() | ||
| 293 | rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split() | ||
| 294 | |||
| 295 | cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True) | ||
| 296 | |||
| 297 | # build PackageName to FileName lookup table | ||
| 298 | if pn not in cacheData.pkg_pn: | ||
| 299 | cacheData.pkg_pn[pn] = [] | ||
| 300 | cacheData.pkg_pn[pn].append(file_name) | ||
| 301 | |||
| 302 | cacheData.stamp[file_name] = self.getVar('STAMP', file_name, True) | ||
| 303 | |||
| 304 | # build FileName to PackageName lookup table | ||
| 305 | cacheData.pkg_fn[file_name] = pn | ||
| 306 | cacheData.pkg_pepvpr[file_name] = (pe,pv,pr) | ||
| 307 | cacheData.pkg_dp[file_name] = dp | ||
| 308 | |||
| 309 | provides = [pn] | ||
| 310 | for provide in (self.getVar("PROVIDES", file_name, True) or "").split(): | ||
| 311 | if provide not in provides: | ||
| 312 | provides.append(provide) | ||
| 313 | |||
| 314 | # Build forward and reverse provider hashes | ||
| 315 | # Forward: virtual -> [filenames] | ||
| 316 | # Reverse: PN -> [virtuals] | ||
| 317 | if pn not in cacheData.pn_provides: | ||
| 318 | cacheData.pn_provides[pn] = [] | ||
| 319 | |||
| 320 | cacheData.fn_provides[file_name] = provides | ||
| 321 | for provide in provides: | ||
| 322 | if provide not in cacheData.providers: | ||
| 323 | cacheData.providers[provide] = [] | ||
| 324 | cacheData.providers[provide].append(file_name) | ||
| 325 | if not provide in cacheData.pn_provides[pn]: | ||
| 326 | cacheData.pn_provides[pn].append(provide) | ||
| 327 | |||
| 328 | cacheData.deps[file_name] = [] | ||
| 329 | for dep in depends: | ||
| 330 | if not dep in cacheData.deps[file_name]: | ||
| 331 | cacheData.deps[file_name].append(dep) | ||
| 332 | if not dep in cacheData.all_depends: | ||
| 333 | cacheData.all_depends.append(dep) | ||
| 334 | |||
| 335 | # Build reverse hash for PACKAGES, so runtime dependencies | ||
| 336 | # can be be resolved (RDEPENDS, RRECOMMENDS etc.) | ||
| 337 | for package in packages: | ||
| 338 | if not package in cacheData.packages: | ||
| 339 | cacheData.packages[package] = [] | ||
| 340 | cacheData.packages[package].append(file_name) | ||
| 341 | rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split() | ||
| 342 | |||
| 343 | for package in packages_dynamic: | ||
| 344 | if not package in cacheData.packages_dynamic: | ||
| 345 | cacheData.packages_dynamic[package] = [] | ||
| 346 | cacheData.packages_dynamic[package].append(file_name) | ||
| 347 | |||
| 348 | for rprovide in rprovides: | ||
| 349 | if not rprovide in cacheData.rproviders: | ||
| 350 | cacheData.rproviders[rprovide] = [] | ||
| 351 | cacheData.rproviders[rprovide].append(file_name) | ||
| 352 | |||
| 353 | # Build hash of runtime depends and rececommends | ||
| 354 | |||
| 355 | if not file_name in cacheData.rundeps: | ||
| 356 | cacheData.rundeps[file_name] = {} | ||
| 357 | if not file_name in cacheData.runrecs: | ||
| 358 | cacheData.runrecs[file_name] = {} | ||
| 359 | |||
| 360 | rdepends = self.getVar('RDEPENDS', file_name, True) or "" | ||
| 361 | rrecommends = self.getVar('RRECOMMENDS', file_name, True) or "" | ||
| 362 | for package in packages + [pn]: | ||
| 363 | if not package in cacheData.rundeps[file_name]: | ||
| 364 | cacheData.rundeps[file_name][package] = [] | ||
| 365 | if not package in cacheData.runrecs[file_name]: | ||
| 366 | cacheData.runrecs[file_name][package] = [] | ||
| 367 | |||
| 368 | cacheData.rundeps[file_name][package] = rdepends + " " + (self.getVar("RDEPENDS_%s" % package, file_name, True) or "") | ||
| 369 | cacheData.runrecs[file_name][package] = rrecommends + " " + (self.getVar("RRECOMMENDS_%s" % package, file_name, True) or "") | ||
| 370 | |||
| 371 | # Collect files we may need for possible world-dep | ||
| 372 | # calculations | ||
| 373 | if not self.getVar('BROKEN', file_name, True) and not self.getVar('EXCLUDE_FROM_WORLD', file_name, True): | ||
| 374 | cacheData.possible_world.append(file_name) | ||
| 375 | |||
| 376 | |||
| 377 | def load_bbfile( self, bbfile , config): | ||
| 378 | """ | ||
| 379 | Load and parse one .bb build file | ||
| 380 | Return the data and whether parsing resulted in the file being skipped | ||
| 381 | """ | ||
| 382 | |||
| 383 | import bb | ||
| 384 | from bb import utils, data, parse, debug, event, fatal | ||
| 385 | |||
| 386 | # expand tmpdir to include this topdir | ||
| 387 | data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config) | ||
| 388 | bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) | ||
| 389 | oldpath = os.path.abspath(os.getcwd()) | ||
| 390 | if bb.parse.cached_mtime_noerror(bbfile_loc): | ||
| 391 | os.chdir(bbfile_loc) | ||
| 392 | bb_data = data.init_db(config) | ||
| 393 | try: | ||
| 394 | bb_data = parse.handle(bbfile, bb_data) # read .bb data | ||
| 395 | os.chdir(oldpath) | ||
| 396 | return bb_data, False | ||
| 397 | except bb.parse.SkipPackage: | ||
| 398 | os.chdir(oldpath) | ||
| 399 | return bb_data, True | ||
| 400 | except: | ||
| 401 | os.chdir(oldpath) | ||
| 402 | raise | ||
| 403 | |||
| 404 | def init(cooker): | ||
| 405 | """ | ||
| 406 | The Objective: Cache the minimum amount of data possible yet get to the | ||
| 407 | stage of building packages (i.e. tryBuild) without reparsing any .bb files. | ||
| 408 | |||
| 409 | To do this, we intercept getVar calls and only cache the variables we see | ||
| 410 | being accessed. We rely on the cache getVar calls being made for all | ||
| 411 | variables bitbake might need to use to reach this stage. For each cached | ||
| 412 | file we need to track: | ||
| 413 | |||
| 414 | * Its mtime | ||
| 415 | * The mtimes of all its dependencies | ||
| 416 | * Whether it caused a parse.SkipPackage exception | ||
| 417 | |||
| 418 | Files causing parsing errors are evicted from the cache. | ||
| 419 | |||
| 420 | """ | ||
| 421 | return Cache(cooker) | ||
| 422 | |||
| 423 | |||
| 424 | |||
| 425 | #============================================================================# | ||
| 426 | # CacheData | ||
| 427 | #============================================================================# | ||
| 428 | class CacheData: | ||
| 429 | """ | ||
| 430 | The data structures we compile from the cached data | ||
| 431 | """ | ||
| 432 | |||
| 433 | def __init__(self): | ||
| 434 | """ | ||
| 435 | Direct cache variables | ||
| 436 | (from Cache.handle_data) | ||
| 437 | """ | ||
| 438 | self.providers = {} | ||
| 439 | self.rproviders = {} | ||
| 440 | self.packages = {} | ||
| 441 | self.packages_dynamic = {} | ||
| 442 | self.possible_world = [] | ||
| 443 | self.pkg_pn = {} | ||
| 444 | self.pkg_fn = {} | ||
| 445 | self.pkg_pepvpr = {} | ||
| 446 | self.pkg_dp = {} | ||
| 447 | self.pn_provides = {} | ||
| 448 | self.fn_provides = {} | ||
| 449 | self.all_depends = [] | ||
| 450 | self.deps = {} | ||
| 451 | self.rundeps = {} | ||
| 452 | self.runrecs = {} | ||
| 453 | self.task_queues = {} | ||
| 454 | self.task_deps = {} | ||
| 455 | self.stamp = {} | ||
| 456 | self.preferred = {} | ||
| 457 | |||
| 458 | """ | ||
| 459 | Indirect Cache variables | ||
| 460 | (set elsewhere) | ||
| 461 | """ | ||
| 462 | self.ignored_dependencies = [] | ||
| 463 | self.world_target = Set() | ||
| 464 | self.bbfile_priority = {} | ||
| 465 | self.bbfile_config_priorities = [] | ||
diff --git a/bitbake-dev/lib/bb/command.py b/bitbake-dev/lib/bb/command.py new file mode 100644 index 0000000000..8384e89e55 --- /dev/null +++ b/bitbake-dev/lib/bb/command.py | |||
| @@ -0,0 +1,211 @@ | |||
| 1 | """ | ||
| 2 | BitBake 'Command' module | ||
| 3 | |||
| 4 | Provide an interface to interact with the bitbake server through 'commands' | ||
| 5 | """ | ||
| 6 | |||
| 7 | # Copyright (C) 2006-2007 Richard Purdie | ||
| 8 | # | ||
| 9 | # This program is free software; you can redistribute it and/or modify | ||
| 10 | # it under the terms of the GNU General Public License version 2 as | ||
| 11 | # published by the Free Software Foundation. | ||
| 12 | # | ||
| 13 | # This program is distributed in the hope that it will be useful, | ||
| 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | # GNU General Public License for more details. | ||
| 17 | # | ||
| 18 | # You should have received a copy of the GNU General Public License along | ||
| 19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 21 | |||
| 22 | """ | ||
| 23 | The bitbake server takes 'commands' from its UI/commandline. | ||
| 24 | Commands are either 'online' of 'offline' in nature. | ||
| 25 | Offline commands return data to the client in the form of events. | ||
| 26 | Online commands must only return data through the function return value | ||
| 27 | and must not trigger events, directly or indirectly. | ||
| 28 | Commands are queued in a CommandQueue | ||
| 29 | """ | ||
| 30 | |||
| 31 | import bb | ||
| 32 | |||
| 33 | offline_cmds = {} | ||
| 34 | online_cmds = {} | ||
| 35 | |||
| 36 | class Command: | ||
| 37 | """ | ||
| 38 | A queue of 'offline' commands for bitbake | ||
| 39 | """ | ||
| 40 | def __init__(self, cooker): | ||
| 41 | |||
| 42 | self.cooker = cooker | ||
| 43 | self.cmds_online = CommandsOnline() | ||
| 44 | self.cmds_offline = CommandsOffline() | ||
| 45 | |||
| 46 | # FIXME Add lock for this | ||
| 47 | self.currentOfflineCommand = None | ||
| 48 | |||
| 49 | for attr in CommandsOnline.__dict__: | ||
| 50 | command = attr[:].lower() | ||
| 51 | method = getattr(CommandsOnline, attr) | ||
| 52 | online_cmds[command] = (method) | ||
| 53 | |||
| 54 | for attr in CommandsOffline.__dict__: | ||
| 55 | command = attr[:].lower() | ||
| 56 | method = getattr(CommandsOffline, attr) | ||
| 57 | offline_cmds[command] = (method) | ||
| 58 | |||
| 59 | def runCommand(self, commandline): | ||
| 60 | try: | ||
| 61 | command = commandline.pop(0) | ||
| 62 | if command in CommandsOnline.__dict__: | ||
| 63 | # Can run online commands straight away | ||
| 64 | return getattr(CommandsOnline, command)(self.cmds_online, self, commandline) | ||
| 65 | if self.currentOfflineCommand is not None: | ||
| 66 | return "Busy (%s in progress)" % self.currentOfflineCommand[0] | ||
| 67 | if command not in CommandsOffline.__dict__: | ||
| 68 | return "No such command" | ||
| 69 | self.currentOfflineCommand = (command, commandline) | ||
| 70 | return True | ||
| 71 | except: | ||
| 72 | import traceback | ||
| 73 | return traceback.format_exc() | ||
| 74 | |||
| 75 | def runOfflineCommand(self): | ||
| 76 | try: | ||
| 77 | if self.currentOfflineCommand is not None: | ||
| 78 | (command, options) = self.currentOfflineCommand | ||
| 79 | getattr(CommandsOffline, command)(self.cmds_offline, self, options) | ||
| 80 | except: | ||
| 81 | import traceback | ||
| 82 | self.finishOfflineCommand(traceback.format_exc()) | ||
| 83 | |||
| 84 | def finishOfflineCommand(self, error = None): | ||
| 85 | if error: | ||
| 86 | bb.event.fire(bb.command.CookerCommandFailed(self.cooker.configuration.event_data, error)) | ||
| 87 | else: | ||
| 88 | bb.event.fire(bb.command.CookerCommandCompleted(self.cooker.configuration.event_data)) | ||
| 89 | self.currentOfflineCommand = None | ||
| 90 | |||
| 91 | |||
| 92 | class CommandsOnline: | ||
| 93 | """ | ||
| 94 | A class of online commands | ||
| 95 | These should run quickly so as not to hurt interactive performance. | ||
| 96 | These must not influence any running offline command. | ||
| 97 | """ | ||
| 98 | |||
| 99 | def stateShutdown(self, command, params): | ||
| 100 | """ | ||
| 101 | Trigger cooker 'shutdown' mode | ||
| 102 | """ | ||
| 103 | command.cooker.cookerAction = bb.cooker.cookerShutdown | ||
| 104 | |||
| 105 | def stateStop(self, command, params): | ||
| 106 | """ | ||
| 107 | Stop the cooker | ||
| 108 | """ | ||
| 109 | command.cooker.cookerAction = bb.cooker.cookerStop | ||
| 110 | |||
| 111 | def getCmdLineAction(self, command, params): | ||
| 112 | """ | ||
| 113 | Get any command parsed from the commandline | ||
| 114 | """ | ||
| 115 | return command.cooker.commandlineAction | ||
| 116 | |||
| 117 | def readVariable(self, command, params): | ||
| 118 | """ | ||
| 119 | Read the value of a variable from configuration.data | ||
| 120 | """ | ||
| 121 | varname = params[0] | ||
| 122 | expand = True | ||
| 123 | if len(params) > 1: | ||
| 124 | expand = params[1] | ||
| 125 | |||
| 126 | return bb.data.getVar(varname, command.cooker.configuration.data, expand) | ||
| 127 | |||
| 128 | class CommandsOffline: | ||
| 129 | """ | ||
| 130 | A class of offline commands | ||
| 131 | These functions communicate via generated events. | ||
| 132 | Any function that requires metadata parsing should be here. | ||
| 133 | """ | ||
| 134 | |||
| 135 | def buildFile(self, command, params): | ||
| 136 | """ | ||
| 137 | Build a single specified .bb file | ||
| 138 | """ | ||
| 139 | bfile = params[0] | ||
| 140 | task = params[1] | ||
| 141 | |||
| 142 | command.cooker.buildFile(bfile, task) | ||
| 143 | |||
| 144 | def buildTargets(self, command, params): | ||
| 145 | """ | ||
| 146 | Build a set of targets | ||
| 147 | """ | ||
| 148 | pkgs_to_build = params[0] | ||
| 149 | |||
| 150 | command.cooker.buildTargets(pkgs_to_build) | ||
| 151 | |||
| 152 | def generateDepTreeEvent(self, command, params): | ||
| 153 | """ | ||
| 154 | Generate an event containing the dependency information | ||
| 155 | """ | ||
| 156 | pkgs_to_build = params[0] | ||
| 157 | |||
| 158 | command.cooker.generateDepTreeEvent(pkgs_to_build) | ||
| 159 | command.finishOfflineCommand() | ||
| 160 | |||
| 161 | def generateDotGraph(self, command, params): | ||
| 162 | """ | ||
| 163 | Dump dependency information to disk as .dot files | ||
| 164 | """ | ||
| 165 | pkgs_to_build = params[0] | ||
| 166 | |||
| 167 | command.cooker.generateDotGraphFiles(pkgs_to_build) | ||
| 168 | command.finishOfflineCommand() | ||
| 169 | |||
| 170 | def showVersions(self, command, params): | ||
| 171 | """ | ||
| 172 | Show the currently selected versions | ||
| 173 | """ | ||
| 174 | command.cooker.showVersions() | ||
| 175 | command.finishOfflineCommand() | ||
| 176 | |||
| 177 | def showEnvironment(self, command, params): | ||
| 178 | """ | ||
| 179 | Print the environment | ||
| 180 | """ | ||
| 181 | bfile = params[0] | ||
| 182 | pkg = params[1] | ||
| 183 | |||
| 184 | command.cooker.showEnvironment(bfile, pkg) | ||
| 185 | command.finishOfflineCommand() | ||
| 186 | |||
| 187 | def parseFiles(self, command, params): | ||
| 188 | """ | ||
| 189 | Parse the .bb files | ||
| 190 | """ | ||
| 191 | command.cooker.updateCache() | ||
| 192 | command.finishOfflineCommand() | ||
| 193 | |||
| 194 | # | ||
| 195 | # Events | ||
| 196 | # | ||
| 197 | class CookerCommandCompleted(bb.event.Event): | ||
| 198 | """ | ||
| 199 | Cooker command completed | ||
| 200 | """ | ||
| 201 | def __init__(self, data): | ||
| 202 | bb.event.Event.__init__(self, data) | ||
| 203 | |||
| 204 | |||
| 205 | class CookerCommandFailed(bb.event.Event): | ||
| 206 | """ | ||
| 207 | Cooker command completed | ||
| 208 | """ | ||
| 209 | def __init__(self, data, error): | ||
| 210 | bb.event.Event.__init__(self, data) | ||
| 211 | self.error = error | ||
diff --git a/bitbake-dev/lib/bb/cooker.py b/bitbake-dev/lib/bb/cooker.py new file mode 100644 index 0000000000..c92ad70a2c --- /dev/null +++ b/bitbake-dev/lib/bb/cooker.py | |||
| @@ -0,0 +1,941 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | # | ||
| 5 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 6 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 7 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | ||
| 8 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
| 9 | # Copyright (C) 2005 ROAD GmbH | ||
| 10 | # Copyright (C) 2006 - 2007 Richard Purdie | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | |||
| 25 | import sys, os, getopt, glob, copy, os.path, re, time | ||
| 26 | import bb | ||
| 27 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue | ||
| 28 | from bb import xmlrpcserver, command | ||
| 29 | from sets import Set | ||
| 30 | import itertools, sre_constants | ||
| 31 | |||
| 32 | class MultipleMatches(Exception): | ||
| 33 | """ | ||
| 34 | Exception raised when multiple file matches are found | ||
| 35 | """ | ||
| 36 | |||
| 37 | class ParsingErrorsFound(Exception): | ||
| 38 | """ | ||
| 39 | Exception raised when parsing errors are found | ||
| 40 | """ | ||
| 41 | |||
| 42 | class NothingToBuild(Exception): | ||
| 43 | """ | ||
| 44 | Exception raised when there is nothing to build | ||
| 45 | """ | ||
| 46 | |||
| 47 | |||
| 48 | # Different states cooker can be in | ||
| 49 | cookerClean = 1 | ||
| 50 | cookerParsed = 2 | ||
| 51 | |||
| 52 | # Different action states the cooker can be in | ||
| 53 | cookerRun = 1 # Cooker is running normally | ||
| 54 | cookerShutdown = 2 # Active tasks should be brought to a controlled stop | ||
| 55 | cookerStop = 3 # Stop, now! | ||
| 56 | |||
| 57 | #============================================================================# | ||
| 58 | # BBCooker | ||
| 59 | #============================================================================# | ||
| 60 | class BBCooker: | ||
| 61 | """ | ||
| 62 | Manages one bitbake build run | ||
| 63 | """ | ||
| 64 | |||
| 65 | def __init__(self, configuration): | ||
| 66 | self.status = None | ||
| 67 | |||
| 68 | self.cache = None | ||
| 69 | self.bb_cache = None | ||
| 70 | |||
| 71 | self.server = bb.xmlrpcserver.BitBakeXMLRPCServer(self) | ||
| 72 | #self.server.register_function(self.showEnvironment) | ||
| 73 | |||
| 74 | self.configuration = configuration | ||
| 75 | |||
| 76 | if self.configuration.verbose: | ||
| 77 | bb.msg.set_verbose(True) | ||
| 78 | |||
| 79 | if self.configuration.debug: | ||
| 80 | bb.msg.set_debug_level(self.configuration.debug) | ||
| 81 | else: | ||
| 82 | bb.msg.set_debug_level(0) | ||
| 83 | |||
| 84 | if self.configuration.debug_domains: | ||
| 85 | bb.msg.set_debug_domains(self.configuration.debug_domains) | ||
| 86 | |||
| 87 | self.configuration.data = bb.data.init() | ||
| 88 | |||
| 89 | for f in self.configuration.file: | ||
| 90 | self.parseConfigurationFile( f ) | ||
| 91 | |||
| 92 | self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) ) | ||
| 93 | |||
| 94 | if not self.configuration.cmd: | ||
| 95 | self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data, True) or "build" | ||
| 96 | |||
| 97 | bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, True) | ||
| 98 | if bbpkgs: | ||
| 99 | self.configuration.pkgs_to_build.extend(bbpkgs.split()) | ||
| 100 | |||
| 101 | # | ||
| 102 | # Special updated configuration we use for firing events | ||
| 103 | # | ||
| 104 | self.configuration.event_data = bb.data.createCopy(self.configuration.data) | ||
| 105 | bb.data.update_data(self.configuration.event_data) | ||
| 106 | |||
| 107 | # TOSTOP must not be set or our children will hang when they output | ||
| 108 | fd = sys.stdout.fileno() | ||
| 109 | if os.isatty(fd): | ||
| 110 | import termios | ||
| 111 | tcattr = termios.tcgetattr(fd) | ||
| 112 | if tcattr[3] & termios.TOSTOP: | ||
| 113 | bb.msg.note(1, bb.msg.domain.Build, "The terminal had the TOSTOP bit set, clearing...") | ||
| 114 | tcattr[3] = tcattr[3] & ~termios.TOSTOP | ||
| 115 | termios.tcsetattr(fd, termios.TCSANOW, tcattr) | ||
| 116 | |||
| 117 | # Change nice level if we're asked to | ||
| 118 | nice = bb.data.getVar("BB_NICE_LEVEL", self.configuration.data, True) | ||
| 119 | if nice: | ||
| 120 | curnice = os.nice(0) | ||
| 121 | nice = int(nice) - curnice | ||
| 122 | bb.msg.note(2, bb.msg.domain.Build, "Renice to %s " % os.nice(nice)) | ||
| 123 | |||
| 124 | # Parse any commandline into actions | ||
| 125 | if self.configuration.show_environment: | ||
| 126 | self.commandlineAction = None | ||
| 127 | |||
| 128 | if 'world' in self.configuration.pkgs_to_build: | ||
| 129 | bb.error("'world' is not a valid target for --environment.") | ||
| 130 | elif len(self.configuration.pkgs_to_build) > 1: | ||
| 131 | bb.error("Only one target can be used with the --environment option.") | ||
| 132 | elif self.configuration.buildfile and len(self.configuration.pkgs_to_build) > 0: | ||
| 133 | bb.error("No target should be used with the --environment and --buildfile options.") | ||
| 134 | else: | ||
| 135 | self.commandlineAction = ["showEnvironment", self.configuration.buildfile, self.configuration.pkgs_to_build] | ||
| 136 | elif self.configuration.buildfile is not None: | ||
| 137 | self.commandlineAction = ["buildFile", self.configuration.buildfile, self.configuration.cmd] | ||
| 138 | elif self.configuration.show_versions: | ||
| 139 | self.commandlineAction = ["showVersions"] | ||
| 140 | elif self.configuration.parse_only: | ||
| 141 | self.commandlineAction = ["parseFiles"] | ||
| 142 | elif self.configuration.dot_graph: | ||
| 143 | if self.configuration.pkgs_to_build: | ||
| 144 | self.commandlineAction = ["generateDotGraph", self.configuration.pkgs_to_build] | ||
| 145 | else: | ||
| 146 | self.commandlineAction = None | ||
| 147 | bb.error("Please specify a package name for dependency graph generation.") | ||
| 148 | else: | ||
| 149 | if self.configuration.pkgs_to_build: | ||
| 150 | self.commandlineAction = ["buildTargets", self.configuration.pkgs_to_build] | ||
| 151 | else: | ||
| 152 | self.commandlineAction = None | ||
| 153 | bb.error("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") | ||
| 154 | |||
| 155 | # FIXME - implement | ||
| 156 | #if self.configuration.interactive: | ||
| 157 | # self.interactiveMode() | ||
| 158 | |||
| 159 | self.command = bb.command.Command(self) | ||
| 160 | self.cookerIdle = True | ||
| 161 | self.cookerState = cookerClean | ||
| 162 | self.cookerAction = cookerRun | ||
| 163 | self.server.register_idle_function(self.runCommands, self) | ||
| 164 | |||
| 165 | |||
| 166 | def runCommands(self, server, data, abort): | ||
| 167 | """ | ||
| 168 | Run any queued offline command | ||
| 169 | This is done by the idle handler so it runs in true context rather than | ||
| 170 | tied to any UI. | ||
| 171 | """ | ||
| 172 | if self.cookerIdle and not abort: | ||
| 173 | self.command.runOfflineCommand() | ||
| 174 | |||
| 175 | # Always reschedule | ||
| 176 | return True | ||
| 177 | |||
| 178 | def tryBuildPackage(self, fn, item, task, the_data): | ||
| 179 | """ | ||
| 180 | Build one task of a package, optionally build following task depends | ||
| 181 | """ | ||
| 182 | bb.event.fire(bb.event.PkgStarted(item, the_data)) | ||
| 183 | try: | ||
| 184 | if not self.configuration.dry_run: | ||
| 185 | bb.build.exec_task('do_%s' % task, the_data) | ||
| 186 | bb.event.fire(bb.event.PkgSucceeded(item, the_data)) | ||
| 187 | return True | ||
| 188 | except bb.build.FuncFailed: | ||
| 189 | bb.msg.error(bb.msg.domain.Build, "task stack execution failed") | ||
| 190 | bb.event.fire(bb.event.PkgFailed(item, the_data)) | ||
| 191 | raise | ||
| 192 | except bb.build.EventException, e: | ||
| 193 | event = e.args[1] | ||
| 194 | bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event)) | ||
| 195 | bb.event.fire(bb.event.PkgFailed(item, the_data)) | ||
| 196 | raise | ||
| 197 | |||
| 198 | def tryBuild(self, fn): | ||
| 199 | """ | ||
| 200 | Build a provider and its dependencies. | ||
| 201 | build_depends is a list of previous build dependencies (not runtime) | ||
| 202 | If build_depends is empty, we're dealing with a runtime depends | ||
| 203 | """ | ||
| 204 | |||
| 205 | the_data = self.bb_cache.loadDataFull(fn, self.configuration.data) | ||
| 206 | |||
| 207 | item = self.status.pkg_fn[fn] | ||
| 208 | |||
| 209 | #if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data): | ||
| 210 | # return True | ||
| 211 | |||
| 212 | return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data) | ||
| 213 | |||
| 214 | def showVersions(self): | ||
| 215 | |||
| 216 | # Need files parsed | ||
| 217 | self.updateCache() | ||
| 218 | |||
| 219 | pkg_pn = self.status.pkg_pn | ||
| 220 | preferred_versions = {} | ||
| 221 | latest_versions = {} | ||
| 222 | |||
| 223 | # Sort by priority | ||
| 224 | for pn in pkg_pn.keys(): | ||
| 225 | (last_ver,last_file,pref_ver,pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status) | ||
| 226 | preferred_versions[pn] = (pref_ver, pref_file) | ||
| 227 | latest_versions[pn] = (last_ver, last_file) | ||
| 228 | |||
| 229 | pkg_list = pkg_pn.keys() | ||
| 230 | pkg_list.sort() | ||
| 231 | |||
| 232 | bb.msg.plain("%-35s %25s %25s" % ("Package Name", "Latest Version", "Preferred Version")) | ||
| 233 | bb.msg.plain("%-35s %25s %25s\n" % ("============", "==============", "=================")) | ||
| 234 | |||
| 235 | for p in pkg_list: | ||
| 236 | pref = preferred_versions[p] | ||
| 237 | latest = latest_versions[p] | ||
| 238 | |||
| 239 | prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2] | ||
| 240 | lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] | ||
| 241 | |||
| 242 | if pref == latest: | ||
| 243 | prefstr = "" | ||
| 244 | |||
| 245 | bb.msg.plain("%-35s %25s %25s" % (p, lateststr, prefstr)) | ||
| 246 | |||
| 247 | def showEnvironment(self, buildfile = None, pkgs_to_build = []): | ||
| 248 | """ | ||
| 249 | Show the outer or per-package environment | ||
| 250 | """ | ||
| 251 | fn = None | ||
| 252 | envdata = None | ||
| 253 | |||
| 254 | if buildfile: | ||
| 255 | self.cb = None | ||
| 256 | self.bb_cache = bb.cache.init(self) | ||
| 257 | fn = self.matchFile(buildfile) | ||
| 258 | elif len(pkgs_to_build) == 1: | ||
| 259 | self.updateCache() | ||
| 260 | |||
| 261 | localdata = data.createCopy(self.configuration.data) | ||
| 262 | bb.data.update_data(localdata) | ||
| 263 | bb.data.expandKeys(localdata) | ||
| 264 | |||
| 265 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
| 266 | taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) | ||
| 267 | taskdata.add_unresolved(localdata, self.status) | ||
| 268 | |||
| 269 | targetid = taskdata.getbuild_id(pkgs_to_build[0]) | ||
| 270 | fnid = taskdata.build_targets[targetid][0] | ||
| 271 | fn = taskdata.fn_index[fnid] | ||
| 272 | else: | ||
| 273 | envdata = self.configuration.data | ||
| 274 | |||
| 275 | if fn: | ||
| 276 | try: | ||
| 277 | envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) | ||
| 278 | except IOError, e: | ||
| 279 | bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) | ||
| 280 | raise | ||
| 281 | except Exception, e: | ||
| 282 | bb.msg.error(bb.msg.domain.Parsing, "%s" % e) | ||
| 283 | raise | ||
| 284 | |||
| 285 | class dummywrite: | ||
| 286 | def __init__(self): | ||
| 287 | self.writebuf = "" | ||
| 288 | def write(self, output): | ||
| 289 | self.writebuf = self.writebuf + output | ||
| 290 | |||
| 291 | # emit variables and shell functions | ||
| 292 | try: | ||
| 293 | data.update_data(envdata) | ||
| 294 | wb = dummywrite() | ||
| 295 | data.emit_env(wb, envdata, True) | ||
| 296 | bb.msg.plain(wb.writebuf) | ||
| 297 | except Exception, e: | ||
| 298 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) | ||
| 299 | # emit the metadata which isnt valid shell | ||
| 300 | data.expandKeys(envdata) | ||
| 301 | for e in envdata.keys(): | ||
| 302 | if data.getVarFlag( e, 'python', envdata ): | ||
| 303 | bb.msg.plain("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1))) | ||
| 304 | |||
| 305 | def generateDepTreeData(self, pkgs_to_build): | ||
| 306 | """ | ||
| 307 | Create a dependency tree of pkgs_to_build, returning the data. | ||
| 308 | """ | ||
| 309 | |||
| 310 | # Need files parsed | ||
| 311 | self.updateCache() | ||
| 312 | |||
| 313 | pkgs_to_build = self.checkPackages(pkgs_to_build) | ||
| 314 | |||
| 315 | localdata = data.createCopy(self.configuration.data) | ||
| 316 | bb.data.update_data(localdata) | ||
| 317 | bb.data.expandKeys(localdata) | ||
| 318 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
| 319 | |||
| 320 | runlist = [] | ||
| 321 | for k in pkgs_to_build: | ||
| 322 | taskdata.add_provider(localdata, self.status, k) | ||
| 323 | runlist.append([k, "do_%s" % self.configuration.cmd]) | ||
| 324 | taskdata.add_unresolved(localdata, self.status) | ||
| 325 | |||
| 326 | rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
| 327 | rq.prepare_runqueue() | ||
| 328 | |||
| 329 | seen_fnids = [] | ||
| 330 | depend_tree = {} | ||
| 331 | depend_tree["depends"] = {} | ||
| 332 | depend_tree["tdepends"] = {} | ||
| 333 | depend_tree["pn"] = {} | ||
| 334 | depend_tree["rdepends-pn"] = {} | ||
| 335 | depend_tree["packages"] = {} | ||
| 336 | depend_tree["rdepends-pkg"] = {} | ||
| 337 | depend_tree["rrecs-pkg"] = {} | ||
| 338 | |||
| 339 | for task in range(len(rq.runq_fnid)): | ||
| 340 | taskname = rq.runq_task[task] | ||
| 341 | fnid = rq.runq_fnid[task] | ||
| 342 | fn = taskdata.fn_index[fnid] | ||
| 343 | pn = self.status.pkg_fn[fn] | ||
| 344 | version = "%s:%s-%s" % self.status.pkg_pepvpr[fn] | ||
| 345 | if pn not in depend_tree["pn"]: | ||
| 346 | depend_tree["pn"][pn] = {} | ||
| 347 | depend_tree["pn"][pn]["filename"] = fn | ||
| 348 | depend_tree["pn"][pn]["version"] = version | ||
| 349 | for dep in rq.runq_depends[task]: | ||
| 350 | depfn = taskdata.fn_index[rq.runq_fnid[dep]] | ||
| 351 | deppn = self.status.pkg_fn[depfn] | ||
| 352 | dotname = "%s.%s" % (pn, rq.runq_task[task]) | ||
| 353 | if not dotname in depend_tree["tdepends"]: | ||
| 354 | depend_tree["tdepends"][dotname] = [] | ||
| 355 | depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, rq.runq_task[dep])) | ||
| 356 | if fnid not in seen_fnids: | ||
| 357 | seen_fnids.append(fnid) | ||
| 358 | packages = [] | ||
| 359 | |||
| 360 | depend_tree["depends"][pn] = [] | ||
| 361 | for dep in taskdata.depids[fnid]: | ||
| 362 | depend_tree["depends"][pn].append(taskdata.build_names_index[dep]) | ||
| 363 | |||
| 364 | depend_tree["rdepends-pn"][pn] = [] | ||
| 365 | for rdep in taskdata.rdepids[fnid]: | ||
| 366 | depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep]) | ||
| 367 | |||
| 368 | rdepends = self.status.rundeps[fn] | ||
| 369 | for package in rdepends: | ||
| 370 | depend_tree["rdepends-pkg"][package] = [] | ||
| 371 | for rdepend in rdepends[package]: | ||
| 372 | depend_tree["rdepends-pkg"][package].append(rdepend) | ||
| 373 | packages.append(package) | ||
| 374 | |||
| 375 | rrecs = self.status.runrecs[fn] | ||
| 376 | for package in rrecs: | ||
| 377 | depend_tree["rrecs-pkg"][package] = [] | ||
| 378 | for rdepend in rrecs[package]: | ||
| 379 | depend_tree["rrecs-pkg"][package].append(rdepend) | ||
| 380 | if not package in packages: | ||
| 381 | packages.append(package) | ||
| 382 | |||
| 383 | for package in packages: | ||
| 384 | if package not in depend_tree["packages"]: | ||
| 385 | depend_tree["packages"][package] = {} | ||
| 386 | depend_tree["packages"][package]["pn"] = pn | ||
| 387 | depend_tree["packages"][package]["filename"] = fn | ||
| 388 | depend_tree["packages"][package]["version"] = version | ||
| 389 | |||
| 390 | return depend_tree | ||
| 391 | |||
| 392 | |||
| 393 | def generateDepTreeEvent(self, pkgs_to_build): | ||
| 394 | """ | ||
| 395 | Create a task dependency graph of pkgs_to_build. | ||
| 396 | Generate an event with the result | ||
| 397 | """ | ||
| 398 | depgraph = self.generateDepTreeData(pkgs_to_build) | ||
| 399 | bb.event.fire(bb.event.DepTreeGenerated(self.configuration.data, depgraph)) | ||
| 400 | |||
| 401 | def generateDotGraphFiles(self, pkgs_to_build): | ||
| 402 | """ | ||
| 403 | Create a task dependency graph of pkgs_to_build. | ||
| 404 | Save the result to a set of .dot files. | ||
| 405 | """ | ||
| 406 | |||
| 407 | depgraph = self.generateDepTreeData(pkgs_to_build) | ||
| 408 | |||
| 409 | # Prints a flattened form of package-depends below where subpackages of a package are merged into the main pn | ||
| 410 | depends_file = file('pn-depends.dot', 'w' ) | ||
| 411 | print >> depends_file, "digraph depends {" | ||
| 412 | for pn in depgraph["pn"]: | ||
| 413 | fn = depgraph["pn"][pn]["filename"] | ||
| 414 | version = depgraph["pn"][pn]["version"] | ||
| 415 | print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn) | ||
| 416 | for pn in depgraph["depends"]: | ||
| 417 | for depend in depgraph["depends"][pn]: | ||
| 418 | print >> depends_file, '"%s" -> "%s"' % (pn, depend) | ||
| 419 | for pn in depgraph["rdepends-pn"]: | ||
| 420 | for rdepend in depgraph["rdepends-pn"][pn]: | ||
| 421 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (pn, rdepend) | ||
| 422 | print >> depends_file, "}" | ||
| 423 | bb.msg.plain("PN dependencies saved to 'pn-depends.dot'") | ||
| 424 | |||
| 425 | depends_file = file('package-depends.dot', 'w' ) | ||
| 426 | print >> depends_file, "digraph depends {" | ||
| 427 | for package in depgraph["packages"]: | ||
| 428 | pn = depgraph["packages"][package]["pn"] | ||
| 429 | fn = depgraph["packages"][package]["filename"] | ||
| 430 | version = depgraph["packages"][package]["version"] | ||
| 431 | if package == pn: | ||
| 432 | print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn) | ||
| 433 | else: | ||
| 434 | print >> depends_file, '"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn) | ||
| 435 | for depend in depgraph["depends"][pn]: | ||
| 436 | print >> depends_file, '"%s" -> "%s"' % (package, depend) | ||
| 437 | for package in depgraph["rdepends-pkg"]: | ||
| 438 | for rdepend in depgraph["rdepends-pkg"][package]: | ||
| 439 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) | ||
| 440 | for package in depgraph["rrecs-pkg"]: | ||
| 441 | for rdepend in depgraph["rrecs-pkg"][package]: | ||
| 442 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) | ||
| 443 | print >> depends_file, "}" | ||
| 444 | bb.msg.plain("Package dependencies saved to 'package-depends.dot'") | ||
| 445 | |||
| 446 | tdepends_file = file('task-depends.dot', 'w' ) | ||
| 447 | print >> tdepends_file, "digraph depends {" | ||
| 448 | for task in depgraph["tdepends"]: | ||
| 449 | (pn, taskname) = task.rsplit(".", 1) | ||
| 450 | fn = depgraph["pn"][pn]["filename"] | ||
| 451 | version = depgraph["pn"][pn]["version"] | ||
| 452 | print >> tdepends_file, '"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn) | ||
| 453 | for dep in depgraph["tdepends"][task]: | ||
| 454 | print >> tdepends_file, '"%s" -> "%s"' % (task, dep) | ||
| 455 | print >> tdepends_file, "}" | ||
| 456 | bb.msg.plain("Task dependencies saved to 'task-depends.dot'") | ||
| 457 | |||
| 458 | def buildDepgraph( self ): | ||
| 459 | all_depends = self.status.all_depends | ||
| 460 | pn_provides = self.status.pn_provides | ||
| 461 | |||
| 462 | localdata = data.createCopy(self.configuration.data) | ||
| 463 | bb.data.update_data(localdata) | ||
| 464 | bb.data.expandKeys(localdata) | ||
| 465 | |||
| 466 | def calc_bbfile_priority(filename): | ||
| 467 | for (regex, pri) in self.status.bbfile_config_priorities: | ||
| 468 | if regex.match(filename): | ||
| 469 | return pri | ||
| 470 | return 0 | ||
| 471 | |||
| 472 | # Handle PREFERRED_PROVIDERS | ||
| 473 | for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split(): | ||
| 474 | try: | ||
| 475 | (providee, provider) = p.split(':') | ||
| 476 | except: | ||
| 477 | bb.msg.fatal(bb.msg.domain.Provider, "Malformed option in PREFERRED_PROVIDERS variable: %s" % p) | ||
| 478 | continue | ||
| 479 | if providee in self.status.preferred and self.status.preferred[providee] != provider: | ||
| 480 | bb.msg.error(bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee])) | ||
| 481 | self.status.preferred[providee] = provider | ||
| 482 | |||
| 483 | # Calculate priorities for each file | ||
| 484 | for p in self.status.pkg_fn.keys(): | ||
| 485 | self.status.bbfile_priority[p] = calc_bbfile_priority(p) | ||
| 486 | |||
| 487 | def buildWorldTargetList(self): | ||
| 488 | """ | ||
| 489 | Build package list for "bitbake world" | ||
| 490 | """ | ||
| 491 | all_depends = self.status.all_depends | ||
| 492 | pn_provides = self.status.pn_provides | ||
| 493 | bb.msg.debug(1, bb.msg.domain.Parsing, "collating packages for \"world\"") | ||
| 494 | for f in self.status.possible_world: | ||
| 495 | terminal = True | ||
| 496 | pn = self.status.pkg_fn[f] | ||
| 497 | |||
| 498 | for p in pn_provides[pn]: | ||
| 499 | if p.startswith('virtual/'): | ||
| 500 | bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to %s provider starting with virtual/" % (f, p)) | ||
| 501 | terminal = False | ||
| 502 | break | ||
| 503 | for pf in self.status.providers[p]: | ||
| 504 | if self.status.pkg_fn[pf] != pn: | ||
| 505 | bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to both us and %s providing %s" % (f, pf, p)) | ||
| 506 | terminal = False | ||
| 507 | break | ||
| 508 | if terminal: | ||
| 509 | self.status.world_target.add(pn) | ||
| 510 | |||
| 511 | # drop reference count now | ||
| 512 | self.status.possible_world = None | ||
| 513 | self.status.all_depends = None | ||
| 514 | |||
| 515 | def interactiveMode( self ): | ||
| 516 | """Drop off into a shell""" | ||
| 517 | try: | ||
| 518 | from bb import shell | ||
| 519 | except ImportError, details: | ||
| 520 | bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details ) | ||
| 521 | else: | ||
| 522 | shell.start( self ) | ||
| 523 | |||
| 524 | def parseConfigurationFile( self, afile ): | ||
| 525 | try: | ||
| 526 | self.configuration.data = bb.parse.handle( afile, self.configuration.data ) | ||
| 527 | |||
| 528 | # Handle any INHERITs and inherit the base class | ||
| 529 | inherits = ["base"] + (bb.data.getVar('INHERIT', self.configuration.data, True ) or "").split() | ||
| 530 | for inherit in inherits: | ||
| 531 | self.configuration.data = bb.parse.handle(os.path.join('classes', '%s.bbclass' % inherit), self.configuration.data, True ) | ||
| 532 | |||
| 533 | # Nomally we only register event handlers at the end of parsing .bb files | ||
| 534 | # We register any handlers we've found so far here... | ||
| 535 | for var in data.getVar('__BBHANDLERS', self.configuration.data) or []: | ||
| 536 | bb.event.register(var,bb.data.getVar(var, self.configuration.data)) | ||
| 537 | |||
| 538 | bb.fetch.fetcher_init(self.configuration.data) | ||
| 539 | |||
| 540 | bb.event.fire(bb.event.ConfigParsed(self.configuration.data)) | ||
| 541 | |||
| 542 | except IOError, e: | ||
| 543 | bb.msg.fatal(bb.msg.domain.Parsing, "Error when parsing %s: %s" % (afile, str(e))) | ||
| 544 | except IOError: | ||
| 545 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to open %s" % afile ) | ||
| 546 | except bb.parse.ParseError, details: | ||
| 547 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to parse %s (%s)" % (afile, details) ) | ||
| 548 | |||
| 549 | def handleCollections( self, collections ): | ||
| 550 | """Handle collections""" | ||
| 551 | if collections: | ||
| 552 | collection_list = collections.split() | ||
| 553 | for c in collection_list: | ||
| 554 | regex = bb.data.getVar("BBFILE_PATTERN_%s" % c, self.configuration.data, 1) | ||
| 555 | if regex == None: | ||
| 556 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s not defined" % c) | ||
| 557 | continue | ||
| 558 | priority = bb.data.getVar("BBFILE_PRIORITY_%s" % c, self.configuration.data, 1) | ||
| 559 | if priority == None: | ||
| 560 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PRIORITY_%s not defined" % c) | ||
| 561 | continue | ||
| 562 | try: | ||
| 563 | cre = re.compile(regex) | ||
| 564 | except re.error: | ||
| 565 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s \"%s\" is not a valid regular expression" % (c, regex)) | ||
| 566 | continue | ||
| 567 | try: | ||
| 568 | pri = int(priority) | ||
| 569 | self.status.bbfile_config_priorities.append((cre, pri)) | ||
| 570 | except ValueError: | ||
| 571 | bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority)) | ||
| 572 | |||
| 573 | def buildSetVars(self): | ||
| 574 | """ | ||
| 575 | Setup any variables needed before starting a build | ||
| 576 | """ | ||
| 577 | if not bb.data.getVar("BUILDNAME", self.configuration.data): | ||
| 578 | bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data) | ||
| 579 | bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()), self.configuration.data) | ||
| 580 | |||
| 581 | def matchFiles(self, buildfile): | ||
| 582 | """ | ||
| 583 | Find the .bb files which match the expression in 'buildfile'. | ||
| 584 | """ | ||
| 585 | |||
| 586 | bf = os.path.abspath(buildfile) | ||
| 587 | try: | ||
| 588 | os.stat(bf) | ||
| 589 | return [bf] | ||
| 590 | except OSError: | ||
| 591 | (filelist, masked) = self.collect_bbfiles() | ||
| 592 | regexp = re.compile(buildfile) | ||
| 593 | matches = [] | ||
| 594 | for f in filelist: | ||
| 595 | if regexp.search(f) and os.path.isfile(f): | ||
| 596 | bf = f | ||
| 597 | matches.append(f) | ||
| 598 | return matches | ||
| 599 | |||
| 600 | def matchFile(self, buildfile): | ||
| 601 | """ | ||
| 602 | Find the .bb file which matches the expression in 'buildfile'. | ||
| 603 | Raise an error if multiple files | ||
| 604 | """ | ||
| 605 | matches = self.matchFiles(buildfile) | ||
| 606 | if len(matches) != 1: | ||
| 607 | bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches))) | ||
| 608 | for f in matches: | ||
| 609 | bb.msg.error(bb.msg.domain.Parsing, " %s" % f) | ||
| 610 | raise MultipleMatches | ||
| 611 | return matches[0] | ||
| 612 | |||
| 613 | def buildFile(self, buildfile, task): | ||
| 614 | """ | ||
| 615 | Build the file matching regexp buildfile | ||
| 616 | """ | ||
| 617 | |||
| 618 | fn = self.matchFile(buildfile) | ||
| 619 | self.buildSetVars() | ||
| 620 | |||
| 621 | # Load data into the cache for fn | ||
| 622 | self.bb_cache = bb.cache.init(self) | ||
| 623 | self.bb_cache.loadData(fn, self.configuration.data) | ||
| 624 | |||
| 625 | # Parse the loaded cache data | ||
| 626 | self.status = bb.cache.CacheData() | ||
| 627 | self.bb_cache.handle_data(fn, self.status) | ||
| 628 | |||
| 629 | # Tweak some variables | ||
| 630 | item = self.bb_cache.getVar('PN', fn, True) | ||
| 631 | self.status.ignored_dependencies = Set() | ||
| 632 | self.status.bbfile_priority[fn] = 1 | ||
| 633 | |||
| 634 | # Remove external dependencies | ||
| 635 | self.status.task_deps[fn]['depends'] = {} | ||
| 636 | self.status.deps[fn] = [] | ||
| 637 | self.status.rundeps[fn] = [] | ||
| 638 | self.status.runrecs[fn] = [] | ||
| 639 | |||
| 640 | # Remove stamp for target if force mode active | ||
| 641 | if self.configuration.force: | ||
| 642 | bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (task, fn)) | ||
| 643 | bb.build.del_stamp('do_%s' % task, self.status, fn) | ||
| 644 | |||
| 645 | # Setup taskdata structure | ||
| 646 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
| 647 | taskdata.add_provider(self.configuration.data, self.status, item) | ||
| 648 | |||
| 649 | buildname = bb.data.getVar("BUILDNAME", self.configuration.data) | ||
| 650 | bb.event.fire(bb.event.BuildStarted(buildname, [item], self.configuration.event_data)) | ||
| 651 | |||
| 652 | # Execute the runqueue | ||
| 653 | runlist = [[item, "do_%s" % self.configuration.cmd]] | ||
| 654 | |||
| 655 | rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
| 656 | |||
| 657 | def buildFileIdle(server, rq, abort): | ||
| 658 | |||
| 659 | if abort or self.cookerAction == cookerStop: | ||
| 660 | rq.finish_runqueue(True) | ||
| 661 | elif self.cookerAction == cookerShutdown: | ||
| 662 | rq.finish_runqueue(False) | ||
| 663 | failures = 0 | ||
| 664 | try: | ||
| 665 | retval = rq.execute_runqueue() | ||
| 666 | except runqueue.TaskFailure, fnids: | ||
| 667 | for fnid in fnids: | ||
| 668 | bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) | ||
| 669 | failures = failures + 1 | ||
| 670 | retval = False | ||
| 671 | if not retval: | ||
| 672 | self.cookerIdle = True | ||
| 673 | self.command.finishOfflineCommand() | ||
| 674 | bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures)) | ||
| 675 | return retval | ||
| 676 | |||
| 677 | self.cookerIdle = False | ||
| 678 | self.server.register_idle_function(buildFileIdle, rq) | ||
| 679 | |||
| 680 | def buildTargets(self, targets): | ||
| 681 | """ | ||
| 682 | Attempt to build the targets specified | ||
| 683 | """ | ||
| 684 | |||
| 685 | # Need files parsed | ||
| 686 | self.updateCache() | ||
| 687 | |||
| 688 | targets = self.checkPackages(targets) | ||
| 689 | |||
| 690 | def buildTargetsIdle(server, rq, abort): | ||
| 691 | |||
| 692 | if abort or self.cookerAction == cookerStop: | ||
| 693 | rq.finish_runqueue(True) | ||
| 694 | elif self.cookerAction == cookerShutdown: | ||
| 695 | rq.finish_runqueue(False) | ||
| 696 | failures = 0 | ||
| 697 | try: | ||
| 698 | retval = rq.execute_runqueue() | ||
| 699 | except runqueue.TaskFailure, fnids: | ||
| 700 | for fnid in fnids: | ||
| 701 | bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) | ||
| 702 | failures = failures + 1 | ||
| 703 | retval = False | ||
| 704 | if not retval: | ||
| 705 | self.cookerIdle = True | ||
| 706 | self.command.finishOfflineCommand() | ||
| 707 | bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures)) | ||
| 708 | return retval | ||
| 709 | |||
| 710 | self.buildSetVars() | ||
| 711 | |||
| 712 | buildname = bb.data.getVar("BUILDNAME", self.configuration.data) | ||
| 713 | bb.event.fire(bb.event.BuildStarted(buildname, targets, self.configuration.event_data)) | ||
| 714 | |||
| 715 | localdata = data.createCopy(self.configuration.data) | ||
| 716 | bb.data.update_data(localdata) | ||
| 717 | bb.data.expandKeys(localdata) | ||
| 718 | |||
| 719 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
| 720 | |||
| 721 | runlist = [] | ||
| 722 | for k in targets: | ||
| 723 | taskdata.add_provider(localdata, self.status, k) | ||
| 724 | runlist.append([k, "do_%s" % self.configuration.cmd]) | ||
| 725 | taskdata.add_unresolved(localdata, self.status) | ||
| 726 | |||
| 727 | rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
| 728 | |||
| 729 | self.cookerIdle = False | ||
| 730 | self.server.register_idle_function(buildTargetsIdle, rq) | ||
| 731 | |||
| 732 | def updateCache(self): | ||
| 733 | |||
| 734 | if self.cookerState == cookerParsed: | ||
| 735 | return | ||
| 736 | |||
| 737 | # Import Psyco if available and not disabled | ||
| 738 | import platform | ||
| 739 | if platform.machine() in ['i386', 'i486', 'i586', 'i686']: | ||
| 740 | if not self.configuration.disable_psyco: | ||
| 741 | try: | ||
| 742 | import psyco | ||
| 743 | except ImportError: | ||
| 744 | bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.") | ||
| 745 | else: | ||
| 746 | psyco.bind( self.parse_bbfiles ) | ||
| 747 | else: | ||
| 748 | bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.") | ||
| 749 | |||
| 750 | self.status = bb.cache.CacheData() | ||
| 751 | |||
| 752 | ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or "" | ||
| 753 | self.status.ignored_dependencies = Set(ignore.split()) | ||
| 754 | |||
| 755 | for dep in self.configuration.extra_assume_provided: | ||
| 756 | self.status.ignored_dependencies.add(dep) | ||
| 757 | |||
| 758 | self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) ) | ||
| 759 | |||
| 760 | bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") | ||
| 761 | (filelist, masked) = self.collect_bbfiles() | ||
| 762 | self.parse_bbfiles(filelist, masked) | ||
| 763 | bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete") | ||
| 764 | |||
| 765 | self.buildDepgraph() | ||
| 766 | |||
| 767 | self.cookerState = cookerParsed | ||
| 768 | |||
| 769 | def checkPackages(self, pkgs_to_build): | ||
| 770 | |||
| 771 | if len(pkgs_to_build) == 0: | ||
| 772 | raise NothingToBuild | ||
| 773 | |||
| 774 | if 'world' in pkgs_to_build: | ||
| 775 | self.buildWorldTargetList() | ||
| 776 | pkgs_to_build.remove('world') | ||
| 777 | for t in self.status.world_target: | ||
| 778 | pkgs_to_build.append(t) | ||
| 779 | |||
| 780 | return pkgs_to_build | ||
| 781 | |||
| 782 | def get_bbfiles( self, path = os.getcwd() ): | ||
| 783 | """Get list of default .bb files by reading out the current directory""" | ||
| 784 | contents = os.listdir(path) | ||
| 785 | bbfiles = [] | ||
| 786 | for f in contents: | ||
| 787 | (root, ext) = os.path.splitext(f) | ||
| 788 | if ext == ".bb": | ||
| 789 | bbfiles.append(os.path.abspath(os.path.join(os.getcwd(),f))) | ||
| 790 | return bbfiles | ||
| 791 | |||
| 792 | def find_bbfiles( self, path ): | ||
| 793 | """Find all the .bb files in a directory""" | ||
| 794 | from os.path import join | ||
| 795 | |||
| 796 | found = [] | ||
| 797 | for dir, dirs, files in os.walk(path): | ||
| 798 | for ignored in ('SCCS', 'CVS', '.svn'): | ||
| 799 | if ignored in dirs: | ||
| 800 | dirs.remove(ignored) | ||
| 801 | found += [join(dir,f) for f in files if f.endswith('.bb')] | ||
| 802 | |||
| 803 | return found | ||
| 804 | |||
| 805 | def collect_bbfiles( self ): | ||
| 806 | """Collect all available .bb build files""" | ||
| 807 | parsed, cached, skipped, masked = 0, 0, 0, 0 | ||
| 808 | self.bb_cache = bb.cache.init(self) | ||
| 809 | |||
| 810 | files = (data.getVar( "BBFILES", self.configuration.data, 1 ) or "").split() | ||
| 811 | data.setVar("BBFILES", " ".join(files), self.configuration.data) | ||
| 812 | |||
| 813 | if not len(files): | ||
| 814 | files = self.get_bbfiles() | ||
| 815 | |||
| 816 | if not len(files): | ||
| 817 | bb.msg.error(bb.msg.domain.Collection, "no files to build.") | ||
| 818 | |||
| 819 | newfiles = [] | ||
| 820 | for f in files: | ||
| 821 | if os.path.isdir(f): | ||
| 822 | dirfiles = self.find_bbfiles(f) | ||
| 823 | if dirfiles: | ||
| 824 | newfiles += dirfiles | ||
| 825 | continue | ||
| 826 | newfiles += glob.glob(f) or [ f ] | ||
| 827 | |||
| 828 | bbmask = bb.data.getVar('BBMASK', self.configuration.data, 1) | ||
| 829 | |||
| 830 | if not bbmask: | ||
| 831 | return (newfiles, 0) | ||
| 832 | |||
| 833 | try: | ||
| 834 | bbmask_compiled = re.compile(bbmask) | ||
| 835 | except sre_constants.error: | ||
| 836 | bb.msg.fatal(bb.msg.domain.Collection, "BBMASK is not a valid regular expression.") | ||
| 837 | |||
| 838 | finalfiles = [] | ||
| 839 | for i in xrange( len( newfiles ) ): | ||
| 840 | f = newfiles[i] | ||
| 841 | if bbmask and bbmask_compiled.search(f): | ||
| 842 | bb.msg.debug(1, bb.msg.domain.Collection, "skipping masked file %s" % f) | ||
| 843 | masked += 1 | ||
| 844 | continue | ||
| 845 | finalfiles.append(f) | ||
| 846 | |||
| 847 | return (finalfiles, masked) | ||
| 848 | |||
| 849 | def parse_bbfiles(self, filelist, masked): | ||
| 850 | parsed, cached, skipped, error, total = 0, 0, 0, 0, len(filelist) | ||
| 851 | for i in xrange(total): | ||
| 852 | f = filelist[i] | ||
| 853 | |||
| 854 | #bb.msg.debug(1, bb.msg.domain.Collection, "parsing %s" % f) | ||
| 855 | |||
| 856 | # read a file's metadata | ||
| 857 | try: | ||
| 858 | fromCache, skip = self.bb_cache.loadData(f, self.configuration.data) | ||
| 859 | if skip: | ||
| 860 | skipped += 1 | ||
| 861 | bb.msg.debug(2, bb.msg.domain.Collection, "skipping %s" % f) | ||
| 862 | self.bb_cache.skip(f) | ||
| 863 | continue | ||
| 864 | elif fromCache: cached += 1 | ||
| 865 | else: parsed += 1 | ||
| 866 | deps = None | ||
| 867 | |||
| 868 | # Disabled by RP as was no longer functional | ||
| 869 | # allow metadata files to add items to BBFILES | ||
| 870 | #data.update_data(self.pkgdata[f]) | ||
| 871 | #addbbfiles = self.bb_cache.getVar('BBFILES', f, False) or None | ||
| 872 | #if addbbfiles: | ||
| 873 | # for aof in addbbfiles.split(): | ||
| 874 | # if not files.count(aof): | ||
| 875 | # if not os.path.isabs(aof): | ||
| 876 | # aof = os.path.join(os.path.dirname(f),aof) | ||
| 877 | # files.append(aof) | ||
| 878 | |||
| 879 | self.bb_cache.handle_data(f, self.status) | ||
| 880 | |||
| 881 | except IOError, e: | ||
| 882 | error += 1 | ||
| 883 | self.bb_cache.remove(f) | ||
| 884 | bb.msg.error(bb.msg.domain.Collection, "opening %s: %s" % (f, e)) | ||
| 885 | pass | ||
| 886 | except KeyboardInterrupt: | ||
| 887 | self.bb_cache.sync() | ||
| 888 | raise | ||
| 889 | except Exception, e: | ||
| 890 | error += 1 | ||
| 891 | self.bb_cache.remove(f) | ||
| 892 | bb.msg.error(bb.msg.domain.Collection, "%s while parsing %s" % (e, f)) | ||
| 893 | except: | ||
| 894 | self.bb_cache.remove(f) | ||
| 895 | raise | ||
| 896 | finally: | ||
| 897 | bb.event.fire(bb.event.ParseProgress(self.configuration.event_data, cached, parsed, skipped, masked, error, total)) | ||
| 898 | |||
| 899 | self.bb_cache.sync() | ||
| 900 | if error > 0: | ||
| 901 | raise ParsingErrorsFound | ||
| 902 | |||
| 903 | def serve(self): | ||
| 904 | |||
| 905 | if self.configuration.profile: | ||
| 906 | try: | ||
| 907 | import cProfile as profile | ||
| 908 | except: | ||
| 909 | import profile | ||
| 910 | |||
| 911 | profile.runctx("self.server.serve_forever()", globals(), locals(), "profile.log") | ||
| 912 | |||
| 913 | # Redirect stdout to capture profile information | ||
| 914 | pout = open('profile.log.processed', 'w') | ||
| 915 | so = sys.stdout.fileno() | ||
| 916 | os.dup2(pout.fileno(), so) | ||
| 917 | |||
| 918 | import pstats | ||
| 919 | p = pstats.Stats('profile.log') | ||
| 920 | p.sort_stats('time') | ||
| 921 | p.print_stats() | ||
| 922 | p.print_callers() | ||
| 923 | p.sort_stats('cumulative') | ||
| 924 | p.print_stats() | ||
| 925 | |||
| 926 | os.dup2(so, pout.fileno()) | ||
| 927 | pout.flush() | ||
| 928 | pout.close() | ||
| 929 | else: | ||
| 930 | self.server.serve_forever() | ||
| 931 | |||
| 932 | bb.event.fire(CookerExit(self.configuration.event_data)) | ||
| 933 | |||
| 934 | class CookerExit(bb.event.Event): | ||
| 935 | """ | ||
| 936 | Notify clients of the Cooker shutdown | ||
| 937 | """ | ||
| 938 | |||
| 939 | def __init__(self, d): | ||
| 940 | bb.event.Event.__init__(self, d) | ||
| 941 | |||
diff --git a/bitbake-dev/lib/bb/daemonize.py b/bitbake-dev/lib/bb/daemonize.py new file mode 100644 index 0000000000..6023c9ccd2 --- /dev/null +++ b/bitbake-dev/lib/bb/daemonize.py | |||
| @@ -0,0 +1,189 @@ | |||
| 1 | """ | ||
| 2 | Python Deamonizing helper | ||
| 3 | |||
| 4 | Configurable daemon behaviors: | ||
| 5 | |||
| 6 | 1.) The current working directory set to the "/" directory. | ||
| 7 | 2.) The current file creation mode mask set to 0. | ||
| 8 | 3.) Close all open files (1024). | ||
| 9 | 4.) Redirect standard I/O streams to "/dev/null". | ||
| 10 | |||
| 11 | A failed call to fork() now raises an exception. | ||
| 12 | |||
| 13 | References: | ||
| 14 | 1) Advanced Programming in the Unix Environment: W. Richard Stevens | ||
| 15 | 2) Unix Programming Frequently Asked Questions: | ||
| 16 | http://www.erlenstar.demon.co.uk/unix/faq_toc.html | ||
| 17 | |||
| 18 | Modified to allow a function to be daemonized and return for | ||
| 19 | bitbake use by Richard Purdie | ||
| 20 | """ | ||
| 21 | |||
| 22 | __author__ = "Chad J. Schroeder" | ||
| 23 | __copyright__ = "Copyright (C) 2005 Chad J. Schroeder" | ||
| 24 | __version__ = "0.2" | ||
| 25 | |||
| 26 | # Standard Python modules. | ||
| 27 | import os # Miscellaneous OS interfaces. | ||
| 28 | import sys # System-specific parameters and functions. | ||
| 29 | |||
| 30 | # Default daemon parameters. | ||
| 31 | # File mode creation mask of the daemon. | ||
| 32 | UMASK = 0 | ||
| 33 | |||
| 34 | # Default maximum for the number of available file descriptors. | ||
| 35 | MAXFD = 1024 | ||
| 36 | |||
| 37 | # The standard I/O file descriptors are redirected to /dev/null by default. | ||
| 38 | if (hasattr(os, "devnull")): | ||
| 39 | REDIRECT_TO = os.devnull | ||
| 40 | else: | ||
| 41 | REDIRECT_TO = "/dev/null" | ||
| 42 | |||
| 43 | def createDaemon(function, logfile): | ||
| 44 | """ | ||
| 45 | Detach a process from the controlling terminal and run it in the | ||
| 46 | background as a daemon, returning control to the caller. | ||
| 47 | """ | ||
| 48 | |||
| 49 | try: | ||
| 50 | # Fork a child process so the parent can exit. This returns control to | ||
| 51 | # the command-line or shell. It also guarantees that the child will not | ||
| 52 | # be a process group leader, since the child receives a new process ID | ||
| 53 | # and inherits the parent's process group ID. This step is required | ||
| 54 | # to insure that the next call to os.setsid is successful. | ||
| 55 | pid = os.fork() | ||
| 56 | except OSError, e: | ||
| 57 | raise Exception, "%s [%d]" % (e.strerror, e.errno) | ||
| 58 | |||
| 59 | if (pid == 0): # The first child. | ||
| 60 | # To become the session leader of this new session and the process group | ||
| 61 | # leader of the new process group, we call os.setsid(). The process is | ||
| 62 | # also guaranteed not to have a controlling terminal. | ||
| 63 | os.setsid() | ||
| 64 | |||
| 65 | # Is ignoring SIGHUP necessary? | ||
| 66 | # | ||
| 67 | # It's often suggested that the SIGHUP signal should be ignored before | ||
| 68 | # the second fork to avoid premature termination of the process. The | ||
| 69 | # reason is that when the first child terminates, all processes, e.g. | ||
| 70 | # the second child, in the orphaned group will be sent a SIGHUP. | ||
| 71 | # | ||
| 72 | # "However, as part of the session management system, there are exactly | ||
| 73 | # two cases where SIGHUP is sent on the death of a process: | ||
| 74 | # | ||
| 75 | # 1) When the process that dies is the session leader of a session that | ||
| 76 | # is attached to a terminal device, SIGHUP is sent to all processes | ||
| 77 | # in the foreground process group of that terminal device. | ||
| 78 | # 2) When the death of a process causes a process group to become | ||
| 79 | # orphaned, and one or more processes in the orphaned group are | ||
| 80 | # stopped, then SIGHUP and SIGCONT are sent to all members of the | ||
| 81 | # orphaned group." [2] | ||
| 82 | # | ||
| 83 | # The first case can be ignored since the child is guaranteed not to have | ||
| 84 | # a controlling terminal. The second case isn't so easy to dismiss. | ||
| 85 | # The process group is orphaned when the first child terminates and | ||
| 86 | # POSIX.1 requires that every STOPPED process in an orphaned process | ||
| 87 | # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the | ||
| 88 | # second child is not STOPPED though, we can safely forego ignoring the | ||
| 89 | # SIGHUP signal. In any case, there are no ill-effects if it is ignored. | ||
| 90 | # | ||
| 91 | # import signal # Set handlers for asynchronous events. | ||
| 92 | # signal.signal(signal.SIGHUP, signal.SIG_IGN) | ||
| 93 | |||
| 94 | try: | ||
| 95 | # Fork a second child and exit immediately to prevent zombies. This | ||
| 96 | # causes the second child process to be orphaned, making the init | ||
| 97 | # process responsible for its cleanup. And, since the first child is | ||
| 98 | # a session leader without a controlling terminal, it's possible for | ||
| 99 | # it to acquire one by opening a terminal in the future (System V- | ||
| 100 | # based systems). This second fork guarantees that the child is no | ||
| 101 | # longer a session leader, preventing the daemon from ever acquiring | ||
| 102 | # a controlling terminal. | ||
| 103 | pid = os.fork() # Fork a second child. | ||
| 104 | except OSError, e: | ||
| 105 | raise Exception, "%s [%d]" % (e.strerror, e.errno) | ||
| 106 | |||
| 107 | if (pid == 0): # The second child. | ||
| 108 | # We probably don't want the file mode creation mask inherited from | ||
| 109 | # the parent, so we give the child complete control over permissions. | ||
| 110 | os.umask(UMASK) | ||
| 111 | else: | ||
| 112 | # Parent (the first child) of the second child. | ||
| 113 | os._exit(0) | ||
| 114 | else: | ||
| 115 | # exit() or _exit()? | ||
| 116 | # _exit is like exit(), but it doesn't call any functions registered | ||
| 117 | # with atexit (and on_exit) or any registered signal handlers. It also | ||
| 118 | # closes any open file descriptors. Using exit() may cause all stdio | ||
| 119 | # streams to be flushed twice and any temporary files may be unexpectedly | ||
| 120 | # removed. It's therefore recommended that child branches of a fork() | ||
| 121 | # and the parent branch(es) of a daemon use _exit(). | ||
| 122 | return | ||
| 123 | |||
| 124 | # Close all open file descriptors. This prevents the child from keeping | ||
| 125 | # open any file descriptors inherited from the parent. There is a variety | ||
| 126 | # of methods to accomplish this task. Three are listed below. | ||
| 127 | # | ||
| 128 | # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum | ||
| 129 | # number of open file descriptors to close. If it doesn't exists, use | ||
| 130 | # the default value (configurable). | ||
| 131 | # | ||
| 132 | # try: | ||
| 133 | # maxfd = os.sysconf("SC_OPEN_MAX") | ||
| 134 | # except (AttributeError, ValueError): | ||
| 135 | # maxfd = MAXFD | ||
| 136 | # | ||
| 137 | # OR | ||
| 138 | # | ||
| 139 | # if (os.sysconf_names.has_key("SC_OPEN_MAX")): | ||
| 140 | # maxfd = os.sysconf("SC_OPEN_MAX") | ||
| 141 | # else: | ||
| 142 | # maxfd = MAXFD | ||
| 143 | # | ||
| 144 | # OR | ||
| 145 | # | ||
| 146 | # Use the getrlimit method to retrieve the maximum file descriptor number | ||
| 147 | # that can be opened by this process. If there is not limit on the | ||
| 148 | # resource, use the default value. | ||
| 149 | # | ||
| 150 | import resource # Resource usage information. | ||
| 151 | maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] | ||
| 152 | if (maxfd == resource.RLIM_INFINITY): | ||
| 153 | maxfd = MAXFD | ||
| 154 | |||
| 155 | # Iterate through and close all file descriptors. | ||
| 156 | # for fd in range(0, maxfd): | ||
| 157 | # try: | ||
| 158 | # os.close(fd) | ||
| 159 | # except OSError: # ERROR, fd wasn't open to begin with (ignored) | ||
| 160 | # pass | ||
| 161 | |||
| 162 | # Redirect the standard I/O file descriptors to the specified file. Since | ||
| 163 | # the daemon has no controlling terminal, most daemons redirect stdin, | ||
| 164 | # stdout, and stderr to /dev/null. This is done to prevent side-effects | ||
| 165 | # from reads and writes to the standard I/O file descriptors. | ||
| 166 | |||
| 167 | # This call to open is guaranteed to return the lowest file descriptor, | ||
| 168 | # which will be 0 (stdin), since it was closed above. | ||
| 169 | # os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) | ||
| 170 | |||
| 171 | # Duplicate standard input to standard output and standard error. | ||
| 172 | # os.dup2(0, 1) # standard output (1) | ||
| 173 | # os.dup2(0, 2) # standard error (2) | ||
| 174 | |||
| 175 | |||
| 176 | si = file('/dev/null', 'r') | ||
| 177 | so = file(logfile, 'w') | ||
| 178 | se = so | ||
| 179 | |||
| 180 | |||
| 181 | # Replace those fds with our own | ||
| 182 | os.dup2(si.fileno(), sys.stdin.fileno()) | ||
| 183 | os.dup2(so.fileno(), sys.stdout.fileno()) | ||
| 184 | os.dup2(se.fileno(), sys.stderr.fileno()) | ||
| 185 | |||
| 186 | function() | ||
| 187 | |||
| 188 | os._exit(0) | ||
| 189 | |||
diff --git a/bitbake-dev/lib/bb/data.py b/bitbake-dev/lib/bb/data.py new file mode 100644 index 0000000000..54b2615afb --- /dev/null +++ b/bitbake-dev/lib/bb/data.py | |||
| @@ -0,0 +1,570 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Data' implementations | ||
| 5 | |||
| 6 | Functions for interacting with the data structure used by the | ||
| 7 | BitBake build tools. | ||
| 8 | |||
| 9 | The expandData and update_data are the most expensive | ||
| 10 | operations. At night the cookie monster came by and | ||
| 11 | suggested 'give me cookies on setting the variables and | ||
| 12 | things will work out'. Taking this suggestion into account | ||
| 13 | applying the skills from the not yet passed 'Entwurf und | ||
| 14 | Analyse von Algorithmen' lecture and the cookie | ||
| 15 | monster seems to be right. We will track setVar more carefully | ||
| 16 | to have faster update_data and expandKeys operations. | ||
| 17 | |||
| 18 | This is a treade-off between speed and memory again but | ||
| 19 | the speed is more critical here. | ||
| 20 | """ | ||
| 21 | |||
| 22 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 23 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
| 24 | # | ||
| 25 | # This program is free software; you can redistribute it and/or modify | ||
| 26 | # it under the terms of the GNU General Public License version 2 as | ||
| 27 | # published by the Free Software Foundation. | ||
| 28 | # | ||
| 29 | # This program is distributed in the hope that it will be useful, | ||
| 30 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 31 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 32 | # GNU General Public License for more details. | ||
| 33 | # | ||
| 34 | # You should have received a copy of the GNU General Public License along | ||
| 35 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 36 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 37 | # | ||
| 38 | #Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 39 | |||
| 40 | import sys, os, re, time, types | ||
| 41 | if sys.argv[0][-5:] == "pydoc": | ||
| 42 | path = os.path.dirname(os.path.dirname(sys.argv[1])) | ||
| 43 | else: | ||
| 44 | path = os.path.dirname(os.path.dirname(sys.argv[0])) | ||
| 45 | sys.path.insert(0,path) | ||
| 46 | |||
| 47 | from bb import data_smart | ||
| 48 | import bb | ||
| 49 | |||
| 50 | _dict_type = data_smart.DataSmart | ||
| 51 | |||
| 52 | def init(): | ||
| 53 | return _dict_type() | ||
| 54 | |||
| 55 | def init_db(parent = None): | ||
| 56 | if parent: | ||
| 57 | return parent.createCopy() | ||
| 58 | else: | ||
| 59 | return _dict_type() | ||
| 60 | |||
| 61 | def createCopy(source): | ||
| 62 | """Link the source set to the destination | ||
| 63 | If one does not find the value in the destination set, | ||
| 64 | search will go on to the source set to get the value. | ||
| 65 | Value from source are copy-on-write. i.e. any try to | ||
| 66 | modify one of them will end up putting the modified value | ||
| 67 | in the destination set. | ||
| 68 | """ | ||
| 69 | return source.createCopy() | ||
| 70 | |||
| 71 | def initVar(var, d): | ||
| 72 | """Non-destructive var init for data structure""" | ||
| 73 | d.initVar(var) | ||
| 74 | |||
| 75 | |||
| 76 | def setVar(var, value, d): | ||
| 77 | """Set a variable to a given value | ||
| 78 | |||
| 79 | Example: | ||
| 80 | >>> d = init() | ||
| 81 | >>> setVar('TEST', 'testcontents', d) | ||
| 82 | >>> print getVar('TEST', d) | ||
| 83 | testcontents | ||
| 84 | """ | ||
| 85 | d.setVar(var,value) | ||
| 86 | |||
| 87 | |||
| 88 | def getVar(var, d, exp = 0): | ||
| 89 | """Gets the value of a variable | ||
| 90 | |||
| 91 | Example: | ||
| 92 | >>> d = init() | ||
| 93 | >>> setVar('TEST', 'testcontents', d) | ||
| 94 | >>> print getVar('TEST', d) | ||
| 95 | testcontents | ||
| 96 | """ | ||
| 97 | return d.getVar(var,exp) | ||
| 98 | |||
| 99 | |||
| 100 | def renameVar(key, newkey, d): | ||
| 101 | """Renames a variable from key to newkey | ||
| 102 | |||
| 103 | Example: | ||
| 104 | >>> d = init() | ||
| 105 | >>> setVar('TEST', 'testcontents', d) | ||
| 106 | >>> renameVar('TEST', 'TEST2', d) | ||
| 107 | >>> print getVar('TEST2', d) | ||
| 108 | testcontents | ||
| 109 | """ | ||
| 110 | d.renameVar(key, newkey) | ||
| 111 | |||
| 112 | def delVar(var, d): | ||
| 113 | """Removes a variable from the data set | ||
| 114 | |||
| 115 | Example: | ||
| 116 | >>> d = init() | ||
| 117 | >>> setVar('TEST', 'testcontents', d) | ||
| 118 | >>> print getVar('TEST', d) | ||
| 119 | testcontents | ||
| 120 | >>> delVar('TEST', d) | ||
| 121 | >>> print getVar('TEST', d) | ||
| 122 | None | ||
| 123 | """ | ||
| 124 | d.delVar(var) | ||
| 125 | |||
| 126 | def setVarFlag(var, flag, flagvalue, d): | ||
| 127 | """Set a flag for a given variable to a given value | ||
| 128 | |||
| 129 | Example: | ||
| 130 | >>> d = init() | ||
| 131 | >>> setVarFlag('TEST', 'python', 1, d) | ||
| 132 | >>> print getVarFlag('TEST', 'python', d) | ||
| 133 | 1 | ||
| 134 | """ | ||
| 135 | d.setVarFlag(var,flag,flagvalue) | ||
| 136 | |||
| 137 | def getVarFlag(var, flag, d): | ||
| 138 | """Gets given flag from given var | ||
| 139 | |||
| 140 | Example: | ||
| 141 | >>> d = init() | ||
| 142 | >>> setVarFlag('TEST', 'python', 1, d) | ||
| 143 | >>> print getVarFlag('TEST', 'python', d) | ||
| 144 | 1 | ||
| 145 | """ | ||
| 146 | return d.getVarFlag(var,flag) | ||
| 147 | |||
| 148 | def delVarFlag(var, flag, d): | ||
| 149 | """Removes a given flag from the variable's flags | ||
| 150 | |||
| 151 | Example: | ||
| 152 | >>> d = init() | ||
| 153 | >>> setVarFlag('TEST', 'testflag', 1, d) | ||
| 154 | >>> print getVarFlag('TEST', 'testflag', d) | ||
| 155 | 1 | ||
| 156 | >>> delVarFlag('TEST', 'testflag', d) | ||
| 157 | >>> print getVarFlag('TEST', 'testflag', d) | ||
| 158 | None | ||
| 159 | |||
| 160 | """ | ||
| 161 | d.delVarFlag(var,flag) | ||
| 162 | |||
| 163 | def setVarFlags(var, flags, d): | ||
| 164 | """Set the flags for a given variable | ||
| 165 | |||
| 166 | Note: | ||
| 167 | setVarFlags will not clear previous | ||
| 168 | flags. Think of this method as | ||
| 169 | addVarFlags | ||
| 170 | |||
| 171 | Example: | ||
| 172 | >>> d = init() | ||
| 173 | >>> myflags = {} | ||
| 174 | >>> myflags['test'] = 'blah' | ||
| 175 | >>> setVarFlags('TEST', myflags, d) | ||
| 176 | >>> print getVarFlag('TEST', 'test', d) | ||
| 177 | blah | ||
| 178 | """ | ||
| 179 | d.setVarFlags(var,flags) | ||
| 180 | |||
| 181 | def getVarFlags(var, d): | ||
| 182 | """Gets a variable's flags | ||
| 183 | |||
| 184 | Example: | ||
| 185 | >>> d = init() | ||
| 186 | >>> setVarFlag('TEST', 'test', 'blah', d) | ||
| 187 | >>> print getVarFlags('TEST', d)['test'] | ||
| 188 | blah | ||
| 189 | """ | ||
| 190 | return d.getVarFlags(var) | ||
| 191 | |||
| 192 | def delVarFlags(var, d): | ||
| 193 | """Removes a variable's flags | ||
| 194 | |||
| 195 | Example: | ||
| 196 | >>> data = init() | ||
| 197 | >>> setVarFlag('TEST', 'testflag', 1, data) | ||
| 198 | >>> print getVarFlag('TEST', 'testflag', data) | ||
| 199 | 1 | ||
| 200 | >>> delVarFlags('TEST', data) | ||
| 201 | >>> print getVarFlags('TEST', data) | ||
| 202 | None | ||
| 203 | |||
| 204 | """ | ||
| 205 | d.delVarFlags(var) | ||
| 206 | |||
| 207 | def keys(d): | ||
| 208 | """Return a list of keys in d | ||
| 209 | |||
| 210 | Example: | ||
| 211 | >>> d = init() | ||
| 212 | >>> setVar('TEST', 1, d) | ||
| 213 | >>> setVar('MOO' , 2, d) | ||
| 214 | >>> setVarFlag('TEST', 'test', 1, d) | ||
| 215 | >>> keys(d) | ||
| 216 | ['TEST', 'MOO'] | ||
| 217 | """ | ||
| 218 | return d.keys() | ||
| 219 | |||
| 220 | def getData(d): | ||
| 221 | """Returns the data object used""" | ||
| 222 | return d | ||
| 223 | |||
| 224 | def setData(newData, d): | ||
| 225 | """Sets the data object to the supplied value""" | ||
| 226 | d = newData | ||
| 227 | |||
| 228 | |||
| 229 | ## | ||
| 230 | ## Cookie Monsters' query functions | ||
| 231 | ## | ||
| 232 | def _get_override_vars(d, override): | ||
| 233 | """ | ||
| 234 | Internal!!! | ||
| 235 | |||
| 236 | Get the Names of Variables that have a specific | ||
| 237 | override. This function returns a iterable | ||
| 238 | Set or an empty list | ||
| 239 | """ | ||
| 240 | return [] | ||
| 241 | |||
| 242 | def _get_var_flags_triple(d): | ||
| 243 | """ | ||
| 244 | Internal!!! | ||
| 245 | |||
| 246 | """ | ||
| 247 | return [] | ||
| 248 | |||
| 249 | __expand_var_regexp__ = re.compile(r"\${[^{}]+}") | ||
| 250 | __expand_python_regexp__ = re.compile(r"\${@.+?}") | ||
| 251 | |||
| 252 | def expand(s, d, varname = None): | ||
| 253 | """Variable expansion using the data store. | ||
| 254 | |||
| 255 | Example: | ||
| 256 | Standard expansion: | ||
| 257 | >>> d = init() | ||
| 258 | >>> setVar('A', 'sshd', d) | ||
| 259 | >>> print expand('/usr/bin/${A}', d) | ||
| 260 | /usr/bin/sshd | ||
| 261 | |||
| 262 | Python expansion: | ||
| 263 | >>> d = init() | ||
| 264 | >>> print expand('result: ${@37 * 72}', d) | ||
| 265 | result: 2664 | ||
| 266 | |||
| 267 | Shell expansion: | ||
| 268 | >>> d = init() | ||
| 269 | >>> print expand('${TARGET_MOO}', d) | ||
| 270 | ${TARGET_MOO} | ||
| 271 | >>> setVar('TARGET_MOO', 'yupp', d) | ||
| 272 | >>> print expand('${TARGET_MOO}',d) | ||
| 273 | yupp | ||
| 274 | >>> setVar('SRC_URI', 'http://somebug.${TARGET_MOO}', d) | ||
| 275 | >>> delVar('TARGET_MOO', d) | ||
| 276 | >>> print expand('${SRC_URI}', d) | ||
| 277 | http://somebug.${TARGET_MOO} | ||
| 278 | """ | ||
| 279 | return d.expand(s, varname) | ||
| 280 | |||
| 281 | def expandKeys(alterdata, readdata = None): | ||
| 282 | if readdata == None: | ||
| 283 | readdata = alterdata | ||
| 284 | |||
| 285 | todolist = {} | ||
| 286 | for key in keys(alterdata): | ||
| 287 | if not '${' in key: | ||
| 288 | continue | ||
| 289 | |||
| 290 | ekey = expand(key, readdata) | ||
| 291 | if key == ekey: | ||
| 292 | continue | ||
| 293 | todolist[key] = ekey | ||
| 294 | |||
| 295 | # These two for loops are split for performance to maximise the | ||
| 296 | # usefulness of the expand cache | ||
| 297 | |||
| 298 | for key in todolist: | ||
| 299 | ekey = todolist[key] | ||
| 300 | renameVar(key, ekey, alterdata) | ||
| 301 | |||
| 302 | def expandData(alterdata, readdata = None): | ||
| 303 | """For each variable in alterdata, expand it, and update the var contents. | ||
| 304 | Replacements use data from readdata. | ||
| 305 | |||
| 306 | Example: | ||
| 307 | >>> a=init() | ||
| 308 | >>> b=init() | ||
| 309 | >>> setVar("dlmsg", "dl_dir is ${DL_DIR}", a) | ||
| 310 | >>> setVar("DL_DIR", "/path/to/whatever", b) | ||
| 311 | >>> expandData(a, b) | ||
| 312 | >>> print getVar("dlmsg", a) | ||
| 313 | dl_dir is /path/to/whatever | ||
| 314 | """ | ||
| 315 | if readdata == None: | ||
| 316 | readdata = alterdata | ||
| 317 | |||
| 318 | for key in keys(alterdata): | ||
| 319 | val = getVar(key, alterdata) | ||
| 320 | if type(val) is not types.StringType: | ||
| 321 | continue | ||
| 322 | expanded = expand(val, readdata) | ||
| 323 | # print "key is %s, val is %s, expanded is %s" % (key, val, expanded) | ||
| 324 | if val != expanded: | ||
| 325 | setVar(key, expanded, alterdata) | ||
| 326 | |||
| 327 | import os | ||
| 328 | |||
| 329 | def inheritFromOS(d): | ||
| 330 | """Inherit variables from the environment.""" | ||
| 331 | # fakeroot needs to be able to set these | ||
| 332 | non_inherit_vars = [ "LD_LIBRARY_PATH", "LD_PRELOAD" ] | ||
| 333 | for s in os.environ.keys(): | ||
| 334 | if not s in non_inherit_vars: | ||
| 335 | try: | ||
| 336 | setVar(s, os.environ[s], d) | ||
| 337 | setVarFlag(s, 'matchesenv', '1', d) | ||
| 338 | except TypeError: | ||
| 339 | pass | ||
| 340 | |||
| 341 | import sys | ||
| 342 | |||
| 343 | def emit_var(var, o=sys.__stdout__, d = init(), all=False): | ||
| 344 | """Emit a variable to be sourced by a shell.""" | ||
| 345 | if getVarFlag(var, "python", d): | ||
| 346 | return 0 | ||
| 347 | |||
| 348 | export = getVarFlag(var, "export", d) | ||
| 349 | unexport = getVarFlag(var, "unexport", d) | ||
| 350 | func = getVarFlag(var, "func", d) | ||
| 351 | if not all and not export and not unexport and not func: | ||
| 352 | return 0 | ||
| 353 | |||
| 354 | try: | ||
| 355 | if all: | ||
| 356 | oval = getVar(var, d, 0) | ||
| 357 | val = getVar(var, d, 1) | ||
| 358 | except KeyboardInterrupt: | ||
| 359 | raise | ||
| 360 | except: | ||
| 361 | excname = str(sys.exc_info()[0]) | ||
| 362 | if excname == "bb.build.FuncFailed": | ||
| 363 | raise | ||
| 364 | o.write('# expansion of %s threw %s\n' % (var, excname)) | ||
| 365 | return 0 | ||
| 366 | |||
| 367 | if all: | ||
| 368 | o.write('# %s=%s\n' % (var, oval)) | ||
| 369 | |||
| 370 | if type(val) is not types.StringType: | ||
| 371 | return 0 | ||
| 372 | |||
| 373 | if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: | ||
| 374 | return 0 | ||
| 375 | |||
| 376 | varExpanded = expand(var, d) | ||
| 377 | |||
| 378 | if unexport: | ||
| 379 | o.write('unset %s\n' % varExpanded) | ||
| 380 | return 1 | ||
| 381 | |||
| 382 | if getVarFlag(var, 'matchesenv', d): | ||
| 383 | return 0 | ||
| 384 | |||
| 385 | val.rstrip() | ||
| 386 | if not val: | ||
| 387 | return 0 | ||
| 388 | |||
| 389 | if func: | ||
| 390 | # NOTE: should probably check for unbalanced {} within the var | ||
| 391 | o.write("%s() {\n%s\n}\n" % (varExpanded, val)) | ||
| 392 | return 1 | ||
| 393 | |||
| 394 | if export: | ||
| 395 | o.write('export ') | ||
| 396 | |||
| 397 | # if we're going to output this within doublequotes, | ||
| 398 | # to a shell, we need to escape the quotes in the var | ||
| 399 | alter = re.sub('"', '\\"', val.strip()) | ||
| 400 | o.write('%s="%s"\n' % (varExpanded, alter)) | ||
| 401 | return 1 | ||
| 402 | |||
| 403 | |||
| 404 | def emit_env(o=sys.__stdout__, d = init(), all=False): | ||
| 405 | """Emits all items in the data store in a format such that it can be sourced by a shell.""" | ||
| 406 | |||
| 407 | env = keys(d) | ||
| 408 | |||
| 409 | for e in env: | ||
| 410 | if getVarFlag(e, "func", d): | ||
| 411 | continue | ||
| 412 | emit_var(e, o, d, all) and o.write('\n') | ||
| 413 | |||
| 414 | for e in env: | ||
| 415 | if not getVarFlag(e, "func", d): | ||
| 416 | continue | ||
| 417 | emit_var(e, o, d) and o.write('\n') | ||
| 418 | |||
| 419 | def update_data(d): | ||
| 420 | """Modifies the environment vars according to local overrides and commands. | ||
| 421 | Examples: | ||
| 422 | Appending to a variable: | ||
| 423 | >>> d = init() | ||
| 424 | >>> setVar('TEST', 'this is a', d) | ||
| 425 | >>> setVar('TEST_append', ' test', d) | ||
| 426 | >>> setVar('TEST_append', ' of the emergency broadcast system.', d) | ||
| 427 | >>> update_data(d) | ||
| 428 | >>> print getVar('TEST', d) | ||
| 429 | this is a test of the emergency broadcast system. | ||
| 430 | |||
| 431 | Prepending to a variable: | ||
| 432 | >>> setVar('TEST', 'virtual/libc', d) | ||
| 433 | >>> setVar('TEST_prepend', 'virtual/tmake ', d) | ||
| 434 | >>> setVar('TEST_prepend', 'virtual/patcher ', d) | ||
| 435 | >>> update_data(d) | ||
| 436 | >>> print getVar('TEST', d) | ||
| 437 | virtual/patcher virtual/tmake virtual/libc | ||
| 438 | |||
| 439 | Overrides: | ||
| 440 | >>> setVar('TEST_arm', 'target', d) | ||
| 441 | >>> setVar('TEST_ramses', 'machine', d) | ||
| 442 | >>> setVar('TEST_local', 'local', d) | ||
| 443 | >>> setVar('OVERRIDES', 'arm', d) | ||
| 444 | |||
| 445 | >>> setVar('TEST', 'original', d) | ||
| 446 | >>> update_data(d) | ||
| 447 | >>> print getVar('TEST', d) | ||
| 448 | target | ||
| 449 | |||
| 450 | >>> setVar('OVERRIDES', 'arm:ramses:local', d) | ||
| 451 | >>> setVar('TEST', 'original', d) | ||
| 452 | >>> update_data(d) | ||
| 453 | >>> print getVar('TEST', d) | ||
| 454 | local | ||
| 455 | |||
| 456 | CopyMonster: | ||
| 457 | >>> e = d.createCopy() | ||
| 458 | >>> setVar('TEST_foo', 'foo', e) | ||
| 459 | >>> update_data(e) | ||
| 460 | >>> print getVar('TEST', e) | ||
| 461 | local | ||
| 462 | |||
| 463 | >>> setVar('OVERRIDES', 'arm:ramses:local:foo', e) | ||
| 464 | >>> update_data(e) | ||
| 465 | >>> print getVar('TEST', e) | ||
| 466 | foo | ||
| 467 | |||
| 468 | >>> f = d.createCopy() | ||
| 469 | >>> setVar('TEST_moo', 'something', f) | ||
| 470 | >>> setVar('OVERRIDES', 'moo:arm:ramses:local:foo', e) | ||
| 471 | >>> update_data(e) | ||
| 472 | >>> print getVar('TEST', e) | ||
| 473 | foo | ||
| 474 | |||
| 475 | |||
| 476 | >>> h = init() | ||
| 477 | >>> setVar('SRC_URI', 'file://append.foo;patch=1 ', h) | ||
| 478 | >>> g = h.createCopy() | ||
| 479 | >>> setVar('SRC_URI_append_arm', 'file://other.foo;patch=1', g) | ||
| 480 | >>> setVar('OVERRIDES', 'arm:moo', g) | ||
| 481 | >>> update_data(g) | ||
| 482 | >>> print getVar('SRC_URI', g) | ||
| 483 | file://append.foo;patch=1 file://other.foo;patch=1 | ||
| 484 | |||
| 485 | """ | ||
| 486 | bb.msg.debug(2, bb.msg.domain.Data, "update_data()") | ||
| 487 | |||
| 488 | # now ask the cookie monster for help | ||
| 489 | #print "Cookie Monster" | ||
| 490 | #print "Append/Prepend %s" % d._special_values | ||
| 491 | #print "Overrides %s" % d._seen_overrides | ||
| 492 | |||
| 493 | overrides = (getVar('OVERRIDES', d, 1) or "").split(':') or [] | ||
| 494 | |||
| 495 | # | ||
| 496 | # Well let us see what breaks here. We used to iterate | ||
| 497 | # over each variable and apply the override and then | ||
| 498 | # do the line expanding. | ||
| 499 | # If we have bad luck - which we will have - the keys | ||
| 500 | # where in some order that is so important for this | ||
| 501 | # method which we don't have anymore. | ||
| 502 | # Anyway we will fix that and write test cases this | ||
| 503 | # time. | ||
| 504 | |||
| 505 | # | ||
| 506 | # First we apply all overrides | ||
| 507 | # Then we will handle _append and _prepend | ||
| 508 | # | ||
| 509 | |||
| 510 | for o in overrides: | ||
| 511 | # calculate '_'+override | ||
| 512 | l = len(o)+1 | ||
| 513 | |||
| 514 | # see if one should even try | ||
| 515 | if not d._seen_overrides.has_key(o): | ||
| 516 | continue | ||
| 517 | |||
| 518 | vars = d._seen_overrides[o] | ||
| 519 | for var in vars: | ||
| 520 | name = var[:-l] | ||
| 521 | try: | ||
| 522 | d[name] = d[var] | ||
| 523 | except: | ||
| 524 | bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") | ||
| 525 | |||
| 526 | # now on to the appends and prepends | ||
| 527 | if d._special_values.has_key('_append'): | ||
| 528 | appends = d._special_values['_append'] or [] | ||
| 529 | for append in appends: | ||
| 530 | for (a, o) in getVarFlag(append, '_append', d) or []: | ||
| 531 | # maybe the OVERRIDE was not yet added so keep the append | ||
| 532 | if (o and o in overrides) or not o: | ||
| 533 | delVarFlag(append, '_append', d) | ||
| 534 | if o and not o in overrides: | ||
| 535 | continue | ||
| 536 | |||
| 537 | sval = getVar(append,d) or "" | ||
| 538 | sval+=a | ||
| 539 | setVar(append, sval, d) | ||
| 540 | |||
| 541 | |||
| 542 | if d._special_values.has_key('_prepend'): | ||
| 543 | prepends = d._special_values['_prepend'] or [] | ||
| 544 | |||
| 545 | for prepend in prepends: | ||
| 546 | for (a, o) in getVarFlag(prepend, '_prepend', d) or []: | ||
| 547 | # maybe the OVERRIDE was not yet added so keep the prepend | ||
| 548 | if (o and o in overrides) or not o: | ||
| 549 | delVarFlag(prepend, '_prepend', d) | ||
| 550 | if o and not o in overrides: | ||
| 551 | continue | ||
| 552 | |||
| 553 | sval = a + (getVar(prepend,d) or "") | ||
| 554 | setVar(prepend, sval, d) | ||
| 555 | |||
| 556 | |||
| 557 | def inherits_class(klass, d): | ||
| 558 | val = getVar('__inherit_cache', d) or [] | ||
| 559 | if os.path.join('classes', '%s.bbclass' % klass) in val: | ||
| 560 | return True | ||
| 561 | return False | ||
| 562 | |||
| 563 | def _test(): | ||
| 564 | """Start a doctest run on this module""" | ||
| 565 | import doctest | ||
| 566 | from bb import data | ||
| 567 | doctest.testmod(data) | ||
| 568 | |||
| 569 | if __name__ == "__main__": | ||
| 570 | _test() | ||
diff --git a/bitbake-dev/lib/bb/data_smart.py b/bitbake-dev/lib/bb/data_smart.py new file mode 100644 index 0000000000..b3a51b0edf --- /dev/null +++ b/bitbake-dev/lib/bb/data_smart.py | |||
| @@ -0,0 +1,292 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake Smart Dictionary Implementation | ||
| 5 | |||
| 6 | Functions for interacting with the data structure used by the | ||
| 7 | BitBake build tools. | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # Copyright (C) 2004, 2005 Seb Frankengul | ||
| 13 | # Copyright (C) 2005, 2006 Holger Hans Peter Freyther | ||
| 14 | # Copyright (C) 2005 Uli Luckas | ||
| 15 | # Copyright (C) 2005 ROAD GmbH | ||
| 16 | # | ||
| 17 | # This program is free software; you can redistribute it and/or modify | ||
| 18 | # it under the terms of the GNU General Public License version 2 as | ||
| 19 | # published by the Free Software Foundation. | ||
| 20 | # | ||
| 21 | # This program is distributed in the hope that it will be useful, | ||
| 22 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 23 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 24 | # GNU General Public License for more details. | ||
| 25 | # | ||
| 26 | # You should have received a copy of the GNU General Public License along | ||
| 27 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 28 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 29 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 30 | |||
| 31 | import copy, os, re, sys, time, types | ||
| 32 | import bb | ||
| 33 | from bb import utils, methodpool | ||
| 34 | from COW import COWDictBase | ||
| 35 | from sets import Set | ||
| 36 | from new import classobj | ||
| 37 | |||
| 38 | |||
| 39 | __setvar_keyword__ = ["_append","_prepend"] | ||
| 40 | __setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?') | ||
| 41 | __expand_var_regexp__ = re.compile(r"\${[^{}]+}") | ||
| 42 | __expand_python_regexp__ = re.compile(r"\${@.+?}") | ||
| 43 | |||
| 44 | |||
| 45 | class DataSmart: | ||
| 46 | def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ): | ||
| 47 | self.dict = {} | ||
| 48 | |||
| 49 | # cookie monster tribute | ||
| 50 | self._special_values = special | ||
| 51 | self._seen_overrides = seen | ||
| 52 | |||
| 53 | self.expand_cache = {} | ||
| 54 | |||
| 55 | def expand(self,s, varname): | ||
| 56 | def var_sub(match): | ||
| 57 | key = match.group()[2:-1] | ||
| 58 | if varname and key: | ||
| 59 | if varname == key: | ||
| 60 | raise Exception("variable %s references itself!" % varname) | ||
| 61 | var = self.getVar(key, 1) | ||
| 62 | if var is not None: | ||
| 63 | return var | ||
| 64 | else: | ||
| 65 | return match.group() | ||
| 66 | |||
| 67 | def python_sub(match): | ||
| 68 | import bb | ||
| 69 | code = match.group()[3:-1] | ||
| 70 | locals()['d'] = self | ||
| 71 | s = eval(code) | ||
| 72 | if type(s) == types.IntType: s = str(s) | ||
| 73 | return s | ||
| 74 | |||
| 75 | if type(s) is not types.StringType: # sanity check | ||
| 76 | return s | ||
| 77 | |||
| 78 | if varname and varname in self.expand_cache: | ||
| 79 | return self.expand_cache[varname] | ||
| 80 | |||
| 81 | while s.find('${') != -1: | ||
| 82 | olds = s | ||
| 83 | try: | ||
| 84 | s = __expand_var_regexp__.sub(var_sub, s) | ||
| 85 | s = __expand_python_regexp__.sub(python_sub, s) | ||
| 86 | if s == olds: break | ||
| 87 | if type(s) is not types.StringType: # sanity check | ||
| 88 | bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s)) | ||
| 89 | except KeyboardInterrupt: | ||
| 90 | raise | ||
| 91 | except: | ||
| 92 | bb.msg.note(1, bb.msg.domain.Data, "%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s)) | ||
| 93 | raise | ||
| 94 | |||
| 95 | if varname: | ||
| 96 | self.expand_cache[varname] = s | ||
| 97 | |||
| 98 | return s | ||
| 99 | |||
| 100 | def initVar(self, var): | ||
| 101 | self.expand_cache = {} | ||
| 102 | if not var in self.dict: | ||
| 103 | self.dict[var] = {} | ||
| 104 | |||
| 105 | def _findVar(self,var): | ||
| 106 | _dest = self.dict | ||
| 107 | |||
| 108 | while (_dest and var not in _dest): | ||
| 109 | if not "_data" in _dest: | ||
| 110 | _dest = None | ||
| 111 | break | ||
| 112 | _dest = _dest["_data"] | ||
| 113 | |||
| 114 | if _dest and var in _dest: | ||
| 115 | return _dest[var] | ||
| 116 | return None | ||
| 117 | |||
| 118 | def _makeShadowCopy(self, var): | ||
| 119 | if var in self.dict: | ||
| 120 | return | ||
| 121 | |||
| 122 | local_var = self._findVar(var) | ||
| 123 | |||
| 124 | if local_var: | ||
| 125 | self.dict[var] = copy.copy(local_var) | ||
| 126 | else: | ||
| 127 | self.initVar(var) | ||
| 128 | |||
| 129 | def setVar(self,var,value): | ||
| 130 | self.expand_cache = {} | ||
| 131 | match = __setvar_regexp__.match(var) | ||
| 132 | if match and match.group("keyword") in __setvar_keyword__: | ||
| 133 | base = match.group('base') | ||
| 134 | keyword = match.group("keyword") | ||
| 135 | override = match.group('add') | ||
| 136 | l = self.getVarFlag(base, keyword) or [] | ||
| 137 | l.append([value, override]) | ||
| 138 | self.setVarFlag(base, keyword, l) | ||
| 139 | |||
| 140 | # todo make sure keyword is not __doc__ or __module__ | ||
| 141 | # pay the cookie monster | ||
| 142 | try: | ||
| 143 | self._special_values[keyword].add( base ) | ||
| 144 | except: | ||
| 145 | self._special_values[keyword] = Set() | ||
| 146 | self._special_values[keyword].add( base ) | ||
| 147 | |||
| 148 | return | ||
| 149 | |||
| 150 | if not var in self.dict: | ||
| 151 | self._makeShadowCopy(var) | ||
| 152 | if self.getVarFlag(var, 'matchesenv'): | ||
| 153 | self.delVarFlag(var, 'matchesenv') | ||
| 154 | self.setVarFlag(var, 'export', 1) | ||
| 155 | |||
| 156 | # more cookies for the cookie monster | ||
| 157 | if '_' in var: | ||
| 158 | override = var[var.rfind('_')+1:] | ||
| 159 | if not self._seen_overrides.has_key(override): | ||
| 160 | self._seen_overrides[override] = Set() | ||
| 161 | self._seen_overrides[override].add( var ) | ||
| 162 | |||
| 163 | # setting var | ||
| 164 | self.dict[var]["content"] = value | ||
| 165 | |||
| 166 | def getVar(self,var,exp): | ||
| 167 | value = self.getVarFlag(var,"content") | ||
| 168 | |||
| 169 | if exp and value: | ||
| 170 | return self.expand(value,var) | ||
| 171 | return value | ||
| 172 | |||
| 173 | def renameVar(self, key, newkey): | ||
| 174 | """ | ||
| 175 | Rename the variable key to newkey | ||
| 176 | """ | ||
| 177 | val = self.getVar(key, 0) | ||
| 178 | if val is None: | ||
| 179 | return | ||
| 180 | |||
| 181 | self.setVar(newkey, val) | ||
| 182 | |||
| 183 | for i in ('_append', '_prepend'): | ||
| 184 | dest = self.getVarFlag(newkey, i) or [] | ||
| 185 | src = self.getVarFlag(key, i) or [] | ||
| 186 | dest.extend(src) | ||
| 187 | self.setVarFlag(newkey, i, dest) | ||
| 188 | |||
| 189 | if self._special_values.has_key(i) and key in self._special_values[i]: | ||
| 190 | self._special_values[i].remove(key) | ||
| 191 | self._special_values[i].add(newkey) | ||
| 192 | |||
| 193 | self.delVar(key) | ||
| 194 | |||
| 195 | def delVar(self,var): | ||
| 196 | self.expand_cache = {} | ||
| 197 | self.dict[var] = {} | ||
| 198 | |||
| 199 | def setVarFlag(self,var,flag,flagvalue): | ||
| 200 | if not var in self.dict: | ||
| 201 | self._makeShadowCopy(var) | ||
| 202 | self.dict[var][flag] = flagvalue | ||
| 203 | |||
| 204 | def getVarFlag(self,var,flag): | ||
| 205 | local_var = self._findVar(var) | ||
| 206 | if local_var: | ||
| 207 | if flag in local_var: | ||
| 208 | return copy.copy(local_var[flag]) | ||
| 209 | return None | ||
| 210 | |||
| 211 | def delVarFlag(self,var,flag): | ||
| 212 | local_var = self._findVar(var) | ||
| 213 | if not local_var: | ||
| 214 | return | ||
| 215 | if not var in self.dict: | ||
| 216 | self._makeShadowCopy(var) | ||
| 217 | |||
| 218 | if var in self.dict and flag in self.dict[var]: | ||
| 219 | del self.dict[var][flag] | ||
| 220 | |||
| 221 | def setVarFlags(self,var,flags): | ||
| 222 | if not var in self.dict: | ||
| 223 | self._makeShadowCopy(var) | ||
| 224 | |||
| 225 | for i in flags.keys(): | ||
| 226 | if i == "content": | ||
| 227 | continue | ||
| 228 | self.dict[var][i] = flags[i] | ||
| 229 | |||
| 230 | def getVarFlags(self,var): | ||
| 231 | local_var = self._findVar(var) | ||
| 232 | flags = {} | ||
| 233 | |||
| 234 | if local_var: | ||
| 235 | for i in local_var.keys(): | ||
| 236 | if i == "content": | ||
| 237 | continue | ||
| 238 | flags[i] = local_var[i] | ||
| 239 | |||
| 240 | if len(flags) == 0: | ||
| 241 | return None | ||
| 242 | return flags | ||
| 243 | |||
| 244 | |||
| 245 | def delVarFlags(self,var): | ||
| 246 | if not var in self.dict: | ||
| 247 | self._makeShadowCopy(var) | ||
| 248 | |||
| 249 | if var in self.dict: | ||
| 250 | content = None | ||
| 251 | |||
| 252 | # try to save the content | ||
| 253 | if "content" in self.dict[var]: | ||
| 254 | content = self.dict[var]["content"] | ||
| 255 | self.dict[var] = {} | ||
| 256 | self.dict[var]["content"] = content | ||
| 257 | else: | ||
| 258 | del self.dict[var] | ||
| 259 | |||
| 260 | |||
| 261 | def createCopy(self): | ||
| 262 | """ | ||
| 263 | Create a copy of self by setting _data to self | ||
| 264 | """ | ||
| 265 | # we really want this to be a DataSmart... | ||
| 266 | data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy()) | ||
| 267 | data.dict["_data"] = self.dict | ||
| 268 | |||
| 269 | return data | ||
| 270 | |||
| 271 | # Dictionary Methods | ||
| 272 | def keys(self): | ||
| 273 | def _keys(d, mykey): | ||
| 274 | if "_data" in d: | ||
| 275 | _keys(d["_data"],mykey) | ||
| 276 | |||
| 277 | for key in d.keys(): | ||
| 278 | if key != "_data": | ||
| 279 | mykey[key] = None | ||
| 280 | keytab = {} | ||
| 281 | _keys(self.dict,keytab) | ||
| 282 | return keytab.keys() | ||
| 283 | |||
| 284 | def __getitem__(self,item): | ||
| 285 | #print "Warning deprecated" | ||
| 286 | return self.getVar(item, False) | ||
| 287 | |||
| 288 | def __setitem__(self,var,data): | ||
| 289 | #print "Warning deprecated" | ||
| 290 | self.setVar(var,data) | ||
| 291 | |||
| 292 | |||
diff --git a/bitbake-dev/lib/bb/event.py b/bitbake-dev/lib/bb/event.py new file mode 100644 index 0000000000..c13a0127a5 --- /dev/null +++ b/bitbake-dev/lib/bb/event.py | |||
| @@ -0,0 +1,302 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Event' implementation | ||
| 5 | |||
| 6 | Classes and functions for manipulating 'events' in the | ||
| 7 | BitBake build tools. | ||
| 8 | """ | ||
| 9 | |||
| 10 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | |||
| 25 | import os, re | ||
| 26 | import bb.utils | ||
| 27 | |||
| 28 | class Event: | ||
| 29 | """Base class for events""" | ||
| 30 | type = "Event" | ||
| 31 | |||
| 32 | def __init__(self, d): | ||
| 33 | self._data = d | ||
| 34 | |||
| 35 | def getData(self): | ||
| 36 | return self._data | ||
| 37 | |||
| 38 | def setData(self, data): | ||
| 39 | self._data = data | ||
| 40 | |||
| 41 | data = property(getData, setData, None, "data property") | ||
| 42 | |||
| 43 | NotHandled = 0 | ||
| 44 | Handled = 1 | ||
| 45 | |||
| 46 | Registered = 10 | ||
| 47 | AlreadyRegistered = 14 | ||
| 48 | |||
| 49 | # Internal | ||
| 50 | _handlers = {} | ||
| 51 | _ui_handlers = {} | ||
| 52 | _ui_handler_seq = 0 | ||
| 53 | |||
| 54 | def fire(event): | ||
| 55 | """Fire off an Event""" | ||
| 56 | |||
| 57 | for handler in _handlers: | ||
| 58 | h = _handlers[handler] | ||
| 59 | if type(h).__name__ == "code": | ||
| 60 | exec(h) | ||
| 61 | tmpHandler(event) | ||
| 62 | else: | ||
| 63 | h(event) | ||
| 64 | |||
| 65 | # Remove the event data elements for UI handlers - too much data otherwise | ||
| 66 | # They can request data if they need it | ||
| 67 | event.data = None | ||
| 68 | event._data = None | ||
| 69 | |||
| 70 | errors = [] | ||
| 71 | for h in _ui_handlers: | ||
| 72 | #print "Sending event %s" % event | ||
| 73 | classid = "%s.%s" % (event.__class__.__module__, event.__class__.__name__) | ||
| 74 | try: | ||
| 75 | _ui_handlers[h].event.send((classid, event)) | ||
| 76 | except: | ||
| 77 | errors.append(h) | ||
| 78 | for h in errors: | ||
| 79 | del _ui_handlers[h] | ||
| 80 | |||
| 81 | def register(name, handler): | ||
| 82 | """Register an Event handler""" | ||
| 83 | |||
| 84 | # already registered | ||
| 85 | if name in _handlers: | ||
| 86 | return AlreadyRegistered | ||
| 87 | |||
| 88 | if handler is not None: | ||
| 89 | # handle string containing python code | ||
| 90 | if type(handler).__name__ == "str": | ||
| 91 | tmp = "def tmpHandler(e):\n%s" % handler | ||
| 92 | comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._registerCode") | ||
| 93 | _handlers[name] = comp | ||
| 94 | else: | ||
| 95 | _handlers[name] = handler | ||
| 96 | |||
| 97 | return Registered | ||
| 98 | |||
| 99 | def remove(name, handler): | ||
| 100 | """Remove an Event handler""" | ||
| 101 | _handlers.pop(name) | ||
| 102 | |||
| 103 | def register_UIHhandler(handler): | ||
| 104 | bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1 | ||
| 105 | _ui_handlers[_ui_handler_seq] = handler | ||
| 106 | return _ui_handler_seq | ||
| 107 | |||
| 108 | def unregister_UIHhandler(handlerNum): | ||
| 109 | if handlerNum in _ui_handlers: | ||
| 110 | del _ui_handlers[handlerNum] | ||
| 111 | return | ||
| 112 | |||
| 113 | def getName(e): | ||
| 114 | """Returns the name of a class or class instance""" | ||
| 115 | if getattr(e, "__name__", None) == None: | ||
| 116 | return e.__class__.__name__ | ||
| 117 | else: | ||
| 118 | return e.__name__ | ||
| 119 | |||
| 120 | class ConfigParsed(Event): | ||
| 121 | """Configuration Parsing Complete""" | ||
| 122 | |||
| 123 | class StampUpdate(Event): | ||
| 124 | """Trigger for any adjustment of the stamp files to happen""" | ||
| 125 | |||
| 126 | def __init__(self, targets, stampfns, d): | ||
| 127 | self._targets = targets | ||
| 128 | self._stampfns = stampfns | ||
| 129 | Event.__init__(self, d) | ||
| 130 | |||
| 131 | def getStampPrefix(self): | ||
| 132 | return self._stampfns | ||
| 133 | |||
| 134 | def getTargets(self): | ||
| 135 | return self._targets | ||
| 136 | |||
| 137 | stampPrefix = property(getStampPrefix) | ||
| 138 | targets = property(getTargets) | ||
| 139 | |||
| 140 | class PkgBase(Event): | ||
| 141 | """Base class for package events""" | ||
| 142 | |||
| 143 | def __init__(self, t, d): | ||
| 144 | self._pkg = t | ||
| 145 | Event.__init__(self, d) | ||
| 146 | self._message = "package %s: %s" % (bb.data.getVar("P", d, 1), getName(self)[3:]) | ||
| 147 | |||
| 148 | def getPkg(self): | ||
| 149 | return self._pkg | ||
| 150 | |||
| 151 | def setPkg(self, pkg): | ||
| 152 | self._pkg = pkg | ||
| 153 | |||
| 154 | pkg = property(getPkg, setPkg, None, "pkg property") | ||
| 155 | |||
| 156 | |||
| 157 | class BuildBase(Event): | ||
| 158 | """Base class for bbmake run events""" | ||
| 159 | |||
| 160 | def __init__(self, n, p, c, failures = 0): | ||
| 161 | self._name = n | ||
| 162 | self._pkgs = p | ||
| 163 | Event.__init__(self, c) | ||
| 164 | self._failures = failures | ||
| 165 | |||
| 166 | def getPkgs(self): | ||
| 167 | return self._pkgs | ||
| 168 | |||
| 169 | def setPkgs(self, pkgs): | ||
| 170 | self._pkgs = pkgs | ||
| 171 | |||
| 172 | def getName(self): | ||
| 173 | return self._name | ||
| 174 | |||
| 175 | def setName(self, name): | ||
| 176 | self._name = name | ||
| 177 | |||
| 178 | def getCfg(self): | ||
| 179 | return self.data | ||
| 180 | |||
| 181 | def setCfg(self, cfg): | ||
| 182 | self.data = cfg | ||
| 183 | |||
| 184 | def getFailures(self): | ||
| 185 | """ | ||
| 186 | Return the number of failed packages | ||
| 187 | """ | ||
| 188 | return self._failures | ||
| 189 | |||
| 190 | pkgs = property(getPkgs, setPkgs, None, "pkgs property") | ||
| 191 | name = property(getName, setName, None, "name property") | ||
| 192 | cfg = property(getCfg, setCfg, None, "cfg property") | ||
| 193 | |||
| 194 | |||
| 195 | class DepBase(PkgBase): | ||
| 196 | """Base class for dependency events""" | ||
| 197 | |||
| 198 | def __init__(self, t, data, d): | ||
| 199 | self._dep = d | ||
| 200 | PkgBase.__init__(self, t, data) | ||
| 201 | |||
| 202 | def getDep(self): | ||
| 203 | return self._dep | ||
| 204 | |||
| 205 | def setDep(self, dep): | ||
| 206 | self._dep = dep | ||
| 207 | |||
| 208 | dep = property(getDep, setDep, None, "dep property") | ||
| 209 | |||
| 210 | |||
| 211 | class PkgStarted(PkgBase): | ||
| 212 | """Package build started""" | ||
| 213 | |||
| 214 | |||
| 215 | class PkgFailed(PkgBase): | ||
| 216 | """Package build failed""" | ||
| 217 | |||
| 218 | |||
| 219 | class PkgSucceeded(PkgBase): | ||
| 220 | """Package build completed""" | ||
| 221 | |||
| 222 | |||
| 223 | class BuildStarted(BuildBase): | ||
| 224 | """bbmake build run started""" | ||
| 225 | |||
| 226 | |||
| 227 | class BuildCompleted(BuildBase): | ||
| 228 | """bbmake build run completed""" | ||
| 229 | |||
| 230 | |||
| 231 | class UnsatisfiedDep(DepBase): | ||
| 232 | """Unsatisfied Dependency""" | ||
| 233 | |||
| 234 | |||
| 235 | class RecursiveDep(DepBase): | ||
| 236 | """Recursive Dependency""" | ||
| 237 | |||
| 238 | class NoProvider(Event): | ||
| 239 | """No Provider for an Event""" | ||
| 240 | |||
| 241 | def __init__(self, item, data, runtime=False): | ||
| 242 | Event.__init__(self, data) | ||
| 243 | self._item = item | ||
| 244 | self._runtime = runtime | ||
| 245 | |||
| 246 | def getItem(self): | ||
| 247 | return self._item | ||
| 248 | |||
| 249 | def isRuntime(self): | ||
| 250 | return self._runtime | ||
| 251 | |||
| 252 | class MultipleProviders(Event): | ||
| 253 | """Multiple Providers""" | ||
| 254 | |||
| 255 | def __init__(self, item, candidates, data, runtime = False): | ||
| 256 | Event.__init__(self, data) | ||
| 257 | self._item = item | ||
| 258 | self._candidates = candidates | ||
| 259 | self._is_runtime = runtime | ||
| 260 | |||
| 261 | def isRuntime(self): | ||
| 262 | """ | ||
| 263 | Is this a runtime issue? | ||
| 264 | """ | ||
| 265 | return self._is_runtime | ||
| 266 | |||
| 267 | def getItem(self): | ||
| 268 | """ | ||
| 269 | The name for the to be build item | ||
| 270 | """ | ||
| 271 | return self._item | ||
| 272 | |||
| 273 | def getCandidates(self): | ||
| 274 | """ | ||
| 275 | Get the possible Candidates for a PROVIDER. | ||
| 276 | """ | ||
| 277 | return self._candidates | ||
| 278 | |||
| 279 | class ParseProgress(Event): | ||
| 280 | """ | ||
| 281 | Parsing Progress Event | ||
| 282 | """ | ||
| 283 | |||
| 284 | def __init__(self, d, cached, parsed, skipped, masked, errors, total): | ||
| 285 | Event.__init__(self, d) | ||
| 286 | self.cached = cached | ||
| 287 | self.parsed = parsed | ||
| 288 | self.skipped = skipped | ||
| 289 | self.masked = masked | ||
| 290 | self.errors = errors | ||
| 291 | self.sofar = cached + parsed + skipped | ||
| 292 | self.total = total | ||
| 293 | |||
| 294 | class DepTreeGenerated(Event): | ||
| 295 | """ | ||
| 296 | Event when a dependency tree has been generated | ||
| 297 | """ | ||
| 298 | |||
| 299 | def __init__(self, d, depgraph): | ||
| 300 | Event.__init__(self, d) | ||
| 301 | self._depgraph = depgraph | ||
| 302 | |||
diff --git a/bitbake-dev/lib/bb/fetch/__init__.py b/bitbake-dev/lib/bb/fetch/__init__.py new file mode 100644 index 0000000000..c3bea447c1 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/__init__.py | |||
| @@ -0,0 +1,556 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | Classes for obtaining upstream sources for the | ||
| 7 | BitBake build tools. | ||
| 8 | """ | ||
| 9 | |||
| 10 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | # | ||
| 25 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 26 | |||
| 27 | import os, re, fcntl | ||
| 28 | import bb | ||
| 29 | from bb import data | ||
| 30 | from bb import persist_data | ||
| 31 | |||
| 32 | try: | ||
| 33 | import cPickle as pickle | ||
| 34 | except ImportError: | ||
| 35 | import pickle | ||
| 36 | |||
| 37 | class FetchError(Exception): | ||
| 38 | """Exception raised when a download fails""" | ||
| 39 | |||
| 40 | class NoMethodError(Exception): | ||
| 41 | """Exception raised when there is no method to obtain a supplied url or set of urls""" | ||
| 42 | |||
| 43 | class MissingParameterError(Exception): | ||
| 44 | """Exception raised when a fetch method is missing a critical parameter in the url""" | ||
| 45 | |||
| 46 | class ParameterError(Exception): | ||
| 47 | """Exception raised when a url cannot be proccessed due to invalid parameters.""" | ||
| 48 | |||
| 49 | class MD5SumError(Exception): | ||
| 50 | """Exception raised when a MD5SUM of a file does not match the expected one""" | ||
| 51 | |||
| 52 | def uri_replace(uri, uri_find, uri_replace, d): | ||
| 53 | # bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: operating on %s" % uri) | ||
| 54 | if not uri or not uri_find or not uri_replace: | ||
| 55 | bb.msg.debug(1, bb.msg.domain.Fetcher, "uri_replace: passed an undefined value, not replacing") | ||
| 56 | uri_decoded = list(bb.decodeurl(uri)) | ||
| 57 | uri_find_decoded = list(bb.decodeurl(uri_find)) | ||
| 58 | uri_replace_decoded = list(bb.decodeurl(uri_replace)) | ||
| 59 | result_decoded = ['','','','','',{}] | ||
| 60 | for i in uri_find_decoded: | ||
| 61 | loc = uri_find_decoded.index(i) | ||
| 62 | result_decoded[loc] = uri_decoded[loc] | ||
| 63 | import types | ||
| 64 | if type(i) == types.StringType: | ||
| 65 | import re | ||
| 66 | if (re.match(i, uri_decoded[loc])): | ||
| 67 | result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc]) | ||
| 68 | if uri_find_decoded.index(i) == 2: | ||
| 69 | if d: | ||
| 70 | localfn = bb.fetch.localpath(uri, d) | ||
| 71 | if localfn: | ||
| 72 | result_decoded[loc] = os.path.dirname(result_decoded[loc]) + "/" + os.path.basename(bb.fetch.localpath(uri, d)) | ||
| 73 | # bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc])) | ||
| 74 | else: | ||
| 75 | # bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: no match") | ||
| 76 | return uri | ||
| 77 | # else: | ||
| 78 | # for j in i.keys(): | ||
| 79 | # FIXME: apply replacements against options | ||
| 80 | return bb.encodeurl(result_decoded) | ||
| 81 | |||
| 82 | methods = [] | ||
| 83 | urldata_cache = {} | ||
| 84 | |||
| 85 | def fetcher_init(d): | ||
| 86 | """ | ||
| 87 | Called to initilize the fetchers once the configuration data is known | ||
| 88 | Calls before this must not hit the cache. | ||
| 89 | """ | ||
| 90 | pd = persist_data.PersistData(d) | ||
| 91 | # When to drop SCM head revisions controled by user policy | ||
| 92 | srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear" | ||
| 93 | if srcrev_policy == "cache": | ||
| 94 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Keeping SRCREV cache due to cache policy of: %s" % srcrev_policy) | ||
| 95 | elif srcrev_policy == "clear": | ||
| 96 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Clearing SRCREV cache due to cache policy of: %s" % srcrev_policy) | ||
| 97 | pd.delDomain("BB_URI_HEADREVS") | ||
| 98 | else: | ||
| 99 | bb.msg.fatal(bb.msg.domain.Fetcher, "Invalid SRCREV cache policy of: %s" % srcrev_policy) | ||
| 100 | # Make sure our domains exist | ||
| 101 | pd.addDomain("BB_URI_HEADREVS") | ||
| 102 | pd.addDomain("BB_URI_LOCALCOUNT") | ||
| 103 | |||
| 104 | # Function call order is usually: | ||
| 105 | # 1. init | ||
| 106 | # 2. go | ||
| 107 | # 3. localpaths | ||
| 108 | # localpath can be called at any time | ||
| 109 | |||
| 110 | def init(urls, d, setup = True): | ||
| 111 | urldata = {} | ||
| 112 | fn = bb.data.getVar('FILE', d, 1) | ||
| 113 | if fn in urldata_cache: | ||
| 114 | urldata = urldata_cache[fn] | ||
| 115 | |||
| 116 | for url in urls: | ||
| 117 | if url not in urldata: | ||
| 118 | urldata[url] = FetchData(url, d) | ||
| 119 | |||
| 120 | if setup: | ||
| 121 | for url in urldata: | ||
| 122 | if not urldata[url].setup: | ||
| 123 | urldata[url].setup_localpath(d) | ||
| 124 | |||
| 125 | urldata_cache[fn] = urldata | ||
| 126 | return urldata | ||
| 127 | |||
| 128 | def go(d): | ||
| 129 | """ | ||
| 130 | Fetch all urls | ||
| 131 | init must have previously been called | ||
| 132 | """ | ||
| 133 | urldata = init([], d, True) | ||
| 134 | |||
| 135 | for u in urldata: | ||
| 136 | ud = urldata[u] | ||
| 137 | m = ud.method | ||
| 138 | if ud.localfile: | ||
| 139 | if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5): | ||
| 140 | # File already present along with md5 stamp file | ||
| 141 | # Touch md5 file to show activity | ||
| 142 | try: | ||
| 143 | os.utime(ud.md5, None) | ||
| 144 | except: | ||
| 145 | # Errors aren't fatal here | ||
| 146 | pass | ||
| 147 | continue | ||
| 148 | lf = bb.utils.lockfile(ud.lockfile) | ||
| 149 | if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5): | ||
| 150 | # If someone else fetched this before we got the lock, | ||
| 151 | # notice and don't try again | ||
| 152 | try: | ||
| 153 | os.utime(ud.md5, None) | ||
| 154 | except: | ||
| 155 | # Errors aren't fatal here | ||
| 156 | pass | ||
| 157 | bb.utils.unlockfile(lf) | ||
| 158 | continue | ||
| 159 | m.go(u, ud, d) | ||
| 160 | if ud.localfile: | ||
| 161 | if not m.forcefetch(u, ud, d): | ||
| 162 | Fetch.write_md5sum(u, ud, d) | ||
| 163 | bb.utils.unlockfile(lf) | ||
| 164 | |||
| 165 | |||
| 166 | def checkstatus(d): | ||
| 167 | """ | ||
| 168 | Check all urls exist upstream | ||
| 169 | init must have previously been called | ||
| 170 | """ | ||
| 171 | urldata = init([], d, True) | ||
| 172 | |||
| 173 | for u in urldata: | ||
| 174 | ud = urldata[u] | ||
| 175 | m = ud.method | ||
| 176 | bb.msg.note(1, bb.msg.domain.Fetcher, "Testing URL %s" % u) | ||
| 177 | ret = m.checkstatus(u, ud, d) | ||
| 178 | if not ret: | ||
| 179 | bb.msg.fatal(bb.msg.domain.Fetcher, "URL %s doesn't work" % u) | ||
| 180 | |||
| 181 | def localpaths(d): | ||
| 182 | """ | ||
| 183 | Return a list of the local filenames, assuming successful fetch | ||
| 184 | """ | ||
| 185 | local = [] | ||
| 186 | urldata = init([], d, True) | ||
| 187 | |||
| 188 | for u in urldata: | ||
| 189 | ud = urldata[u] | ||
| 190 | local.append(ud.localpath) | ||
| 191 | |||
| 192 | return local | ||
| 193 | |||
| 194 | srcrev_internal_call = False | ||
| 195 | |||
| 196 | def get_srcrev(d): | ||
| 197 | """ | ||
| 198 | Return the version string for the current package | ||
| 199 | (usually to be used as PV) | ||
| 200 | Most packages usually only have one SCM so we just pass on the call. | ||
| 201 | In the multi SCM case, we build a value based on SRCREV_FORMAT which must | ||
| 202 | have been set. | ||
| 203 | """ | ||
| 204 | |||
| 205 | # | ||
| 206 | # Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which | ||
| 207 | # could translate into a call to here. If it does, we need to catch this | ||
| 208 | # and provide some way so it knows get_srcrev is active instead of being | ||
| 209 | # some number etc. hence the srcrev_internal_call tracking and the magic | ||
| 210 | # "SRCREVINACTION" return value. | ||
| 211 | # | ||
| 212 | # Neater solutions welcome! | ||
| 213 | # | ||
| 214 | if bb.fetch.srcrev_internal_call: | ||
| 215 | return "SRCREVINACTION" | ||
| 216 | |||
| 217 | scms = [] | ||
| 218 | |||
| 219 | # Only call setup_localpath on URIs which suppports_srcrev() | ||
| 220 | urldata = init(bb.data.getVar('SRC_URI', d, 1).split(), d, False) | ||
| 221 | for u in urldata: | ||
| 222 | ud = urldata[u] | ||
| 223 | if ud.method.suppports_srcrev(): | ||
| 224 | if not ud.setup: | ||
| 225 | ud.setup_localpath(d) | ||
| 226 | scms.append(u) | ||
| 227 | |||
| 228 | if len(scms) == 0: | ||
| 229 | bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI") | ||
| 230 | raise ParameterError | ||
| 231 | |||
| 232 | if len(scms) == 1: | ||
| 233 | return urldata[scms[0]].method.sortable_revision(scms[0], urldata[scms[0]], d) | ||
| 234 | |||
| 235 | # | ||
| 236 | # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT | ||
| 237 | # | ||
| 238 | format = bb.data.getVar('SRCREV_FORMAT', d, 1) | ||
| 239 | if not format: | ||
| 240 | bb.msg.error(bb.msg.domain.Fetcher, "The SRCREV_FORMAT variable must be set when multiple SCMs are used.") | ||
| 241 | raise ParameterError | ||
| 242 | |||
| 243 | for scm in scms: | ||
| 244 | if 'name' in urldata[scm].parm: | ||
| 245 | name = urldata[scm].parm["name"] | ||
| 246 | rev = urldata[scm].method.sortable_revision(scm, urldata[scm], d) | ||
| 247 | format = format.replace(name, rev) | ||
| 248 | |||
| 249 | return format | ||
| 250 | |||
| 251 | def localpath(url, d, cache = True): | ||
| 252 | """ | ||
| 253 | Called from the parser with cache=False since the cache isn't ready | ||
| 254 | at this point. Also called from classed in OE e.g. patch.bbclass | ||
| 255 | """ | ||
| 256 | ud = init([url], d) | ||
| 257 | if ud[url].method: | ||
| 258 | return ud[url].localpath | ||
| 259 | return url | ||
| 260 | |||
| 261 | def runfetchcmd(cmd, d, quiet = False): | ||
| 262 | """ | ||
| 263 | Run cmd returning the command output | ||
| 264 | Raise an error if interrupted or cmd fails | ||
| 265 | Optionally echo command output to stdout | ||
| 266 | """ | ||
| 267 | |||
| 268 | # Need to export PATH as binary could be in metadata paths | ||
| 269 | # rather than host provided | ||
| 270 | # Also include some other variables. | ||
| 271 | # FIXME: Should really include all export varaiables? | ||
| 272 | exportvars = ['PATH', 'GIT_PROXY_HOST', 'GIT_PROXY_PORT', 'GIT_PROXY_COMMAND'] | ||
| 273 | |||
| 274 | for var in exportvars: | ||
| 275 | val = data.getVar(var, d, True) | ||
| 276 | if val: | ||
| 277 | cmd = 'export ' + var + '=%s; %s' % (val, cmd) | ||
| 278 | |||
| 279 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cmd) | ||
| 280 | |||
| 281 | # redirect stderr to stdout | ||
| 282 | stdout_handle = os.popen(cmd + " 2>&1", "r") | ||
| 283 | output = "" | ||
| 284 | |||
| 285 | while 1: | ||
| 286 | line = stdout_handle.readline() | ||
| 287 | if not line: | ||
| 288 | break | ||
| 289 | if not quiet: | ||
| 290 | print line, | ||
| 291 | output += line | ||
| 292 | |||
| 293 | status = stdout_handle.close() or 0 | ||
| 294 | signal = status >> 8 | ||
| 295 | exitstatus = status & 0xff | ||
| 296 | |||
| 297 | if signal: | ||
| 298 | raise FetchError("Fetch command %s failed with signal %s, output:\n%s" % (cmd, signal, output)) | ||
| 299 | elif status != 0: | ||
| 300 | raise FetchError("Fetch command %s failed with exit code %s, output:\n%s" % (cmd, status, output)) | ||
| 301 | |||
| 302 | return output | ||
| 303 | |||
| 304 | class FetchData(object): | ||
| 305 | """ | ||
| 306 | A class which represents the fetcher state for a given URI. | ||
| 307 | """ | ||
| 308 | def __init__(self, url, d): | ||
| 309 | self.localfile = "" | ||
| 310 | (self.type, self.host, self.path, self.user, self.pswd, self.parm) = bb.decodeurl(data.expand(url, d)) | ||
| 311 | self.date = Fetch.getSRCDate(self, d) | ||
| 312 | self.url = url | ||
| 313 | self.setup = False | ||
| 314 | for m in methods: | ||
| 315 | if m.supports(url, self, d): | ||
| 316 | self.method = m | ||
| 317 | return | ||
| 318 | raise NoMethodError("Missing implementation for url %s" % url) | ||
| 319 | |||
| 320 | def setup_localpath(self, d): | ||
| 321 | self.setup = True | ||
| 322 | if "localpath" in self.parm: | ||
| 323 | # if user sets localpath for file, use it instead. | ||
| 324 | self.localpath = self.parm["localpath"] | ||
| 325 | else: | ||
| 326 | bb.fetch.srcrev_internal_call = True | ||
| 327 | self.localpath = self.method.localpath(self.url, self, d) | ||
| 328 | bb.fetch.srcrev_internal_call = False | ||
| 329 | # We have to clear data's internal caches since the cached value of SRCREV is now wrong. | ||
| 330 | # Horrible... | ||
| 331 | bb.data.delVar("ISHOULDNEVEREXIST", d) | ||
| 332 | self.md5 = self.localpath + '.md5' | ||
| 333 | self.lockfile = self.localpath + '.lock' | ||
| 334 | |||
| 335 | |||
| 336 | class Fetch(object): | ||
| 337 | """Base class for 'fetch'ing data""" | ||
| 338 | |||
| 339 | def __init__(self, urls = []): | ||
| 340 | self.urls = [] | ||
| 341 | |||
| 342 | def supports(self, url, urldata, d): | ||
| 343 | """ | ||
| 344 | Check to see if this fetch class supports a given url. | ||
| 345 | """ | ||
| 346 | return 0 | ||
| 347 | |||
| 348 | def localpath(self, url, urldata, d): | ||
| 349 | """ | ||
| 350 | Return the local filename of a given url assuming a successful fetch. | ||
| 351 | Can also setup variables in urldata for use in go (saving code duplication | ||
| 352 | and duplicate code execution) | ||
| 353 | """ | ||
| 354 | return url | ||
| 355 | |||
| 356 | def setUrls(self, urls): | ||
| 357 | self.__urls = urls | ||
| 358 | |||
| 359 | def getUrls(self): | ||
| 360 | return self.__urls | ||
| 361 | |||
| 362 | urls = property(getUrls, setUrls, None, "Urls property") | ||
| 363 | |||
| 364 | def forcefetch(self, url, urldata, d): | ||
| 365 | """ | ||
| 366 | Force a fetch, even if localpath exists? | ||
| 367 | """ | ||
| 368 | return False | ||
| 369 | |||
| 370 | def suppports_srcrev(self): | ||
| 371 | """ | ||
| 372 | The fetcher supports auto source revisions (SRCREV) | ||
| 373 | """ | ||
| 374 | return False | ||
| 375 | |||
| 376 | def go(self, url, urldata, d): | ||
| 377 | """ | ||
| 378 | Fetch urls | ||
| 379 | Assumes localpath was called first | ||
| 380 | """ | ||
| 381 | raise NoMethodError("Missing implementation for url") | ||
| 382 | |||
| 383 | def checkstatus(self, url, urldata, d): | ||
| 384 | """ | ||
| 385 | Check the status of a URL | ||
| 386 | Assumes localpath was called first | ||
| 387 | """ | ||
| 388 | bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s could not be checked for status since no method exists." % url) | ||
| 389 | return True | ||
| 390 | |||
| 391 | def getSRCDate(urldata, d): | ||
| 392 | """ | ||
| 393 | Return the SRC Date for the component | ||
| 394 | |||
| 395 | d the bb.data module | ||
| 396 | """ | ||
| 397 | if "srcdate" in urldata.parm: | ||
| 398 | return urldata.parm['srcdate'] | ||
| 399 | |||
| 400 | pn = data.getVar("PN", d, 1) | ||
| 401 | |||
| 402 | if pn: | ||
| 403 | return data.getVar("SRCDATE_%s" % pn, d, 1) or data.getVar("CVSDATE_%s" % pn, d, 1) or data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1) | ||
| 404 | |||
| 405 | return data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1) | ||
| 406 | getSRCDate = staticmethod(getSRCDate) | ||
| 407 | |||
| 408 | def srcrev_internal_helper(ud, d): | ||
| 409 | """ | ||
| 410 | Return: | ||
| 411 | a) a source revision if specified | ||
| 412 | b) True if auto srcrev is in action | ||
| 413 | c) False otherwise | ||
| 414 | """ | ||
| 415 | |||
| 416 | if 'rev' in ud.parm: | ||
| 417 | return ud.parm['rev'] | ||
| 418 | |||
| 419 | if 'tag' in ud.parm: | ||
| 420 | return ud.parm['tag'] | ||
| 421 | |||
| 422 | rev = None | ||
| 423 | if 'name' in ud.parm: | ||
| 424 | pn = data.getVar("PN", d, 1) | ||
| 425 | rev = data.getVar("SRCREV_pn-" + pn + "_" + ud.parm['name'], d, 1) | ||
| 426 | if not rev: | ||
| 427 | rev = data.getVar("SRCREV", d, 1) | ||
| 428 | if not rev: | ||
| 429 | return False | ||
| 430 | if rev is "SRCREVINACTION": | ||
| 431 | return True | ||
| 432 | return rev | ||
| 433 | |||
| 434 | srcrev_internal_helper = staticmethod(srcrev_internal_helper) | ||
| 435 | |||
| 436 | def try_mirror(d, tarfn): | ||
| 437 | """ | ||
| 438 | Try to use a mirrored version of the sources. We do this | ||
| 439 | to avoid massive loads on foreign cvs and svn servers. | ||
| 440 | This method will be used by the different fetcher | ||
| 441 | implementations. | ||
| 442 | |||
| 443 | d Is a bb.data instance | ||
| 444 | tarfn is the name of the tarball | ||
| 445 | """ | ||
| 446 | tarpath = os.path.join(data.getVar("DL_DIR", d, 1), tarfn) | ||
| 447 | if os.access(tarpath, os.R_OK): | ||
| 448 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists, skipping checkout." % tarfn) | ||
| 449 | return True | ||
| 450 | |||
| 451 | pn = data.getVar('PN', d, True) | ||
| 452 | src_tarball_stash = None | ||
| 453 | if pn: | ||
| 454 | src_tarball_stash = (data.getVar('SRC_TARBALL_STASH_%s' % pn, d, True) or data.getVar('CVS_TARBALL_STASH_%s' % pn, d, True) or data.getVar('SRC_TARBALL_STASH', d, True) or data.getVar('CVS_TARBALL_STASH', d, True) or "").split() | ||
| 455 | |||
| 456 | for stash in src_tarball_stash: | ||
| 457 | fetchcmd = data.getVar("FETCHCOMMAND_mirror", d, True) or data.getVar("FETCHCOMMAND_wget", d, True) | ||
| 458 | uri = stash + tarfn | ||
| 459 | bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) | ||
| 460 | fetchcmd = fetchcmd.replace("${URI}", uri) | ||
| 461 | ret = os.system(fetchcmd) | ||
| 462 | if ret == 0: | ||
| 463 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetched %s from tarball stash, skipping checkout" % tarfn) | ||
| 464 | return True | ||
| 465 | return False | ||
| 466 | try_mirror = staticmethod(try_mirror) | ||
| 467 | |||
| 468 | def verify_md5sum(ud, got_sum): | ||
| 469 | """ | ||
| 470 | Verify the md5sum we wanted with the one we got | ||
| 471 | """ | ||
| 472 | wanted_sum = None | ||
| 473 | if 'md5sum' in ud.parm: | ||
| 474 | wanted_sum = ud.parm['md5sum'] | ||
| 475 | if not wanted_sum: | ||
| 476 | return True | ||
| 477 | |||
| 478 | return wanted_sum == got_sum | ||
| 479 | verify_md5sum = staticmethod(verify_md5sum) | ||
| 480 | |||
| 481 | def write_md5sum(url, ud, d): | ||
| 482 | md5data = bb.utils.md5_file(ud.localpath) | ||
| 483 | # verify the md5sum | ||
| 484 | if not Fetch.verify_md5sum(ud, md5data): | ||
| 485 | raise MD5SumError(url) | ||
| 486 | |||
| 487 | md5out = file(ud.md5, 'w') | ||
| 488 | md5out.write(md5data) | ||
| 489 | md5out.close() | ||
| 490 | write_md5sum = staticmethod(write_md5sum) | ||
| 491 | |||
| 492 | def latest_revision(self, url, ud, d): | ||
| 493 | """ | ||
| 494 | Look in the cache for the latest revision, if not present ask the SCM. | ||
| 495 | """ | ||
| 496 | if not hasattr(self, "_latest_revision"): | ||
| 497 | raise ParameterError | ||
| 498 | |||
| 499 | pd = persist_data.PersistData(d) | ||
| 500 | key = self._revision_key(url, ud, d) | ||
| 501 | rev = pd.getValue("BB_URI_HEADREVS", key) | ||
| 502 | if rev != None: | ||
| 503 | return str(rev) | ||
| 504 | |||
| 505 | rev = self._latest_revision(url, ud, d) | ||
| 506 | pd.setValue("BB_URI_HEADREVS", key, rev) | ||
| 507 | return rev | ||
| 508 | |||
| 509 | def sortable_revision(self, url, ud, d): | ||
| 510 | """ | ||
| 511 | |||
| 512 | """ | ||
| 513 | if hasattr(self, "_sortable_revision"): | ||
| 514 | return self._sortable_revision(url, ud, d) | ||
| 515 | |||
| 516 | pd = persist_data.PersistData(d) | ||
| 517 | key = self._revision_key(url, ud, d) | ||
| 518 | latest_rev = self._build_revision(url, ud, d) | ||
| 519 | last_rev = pd.getValue("BB_URI_LOCALCOUNT", key + "_rev") | ||
| 520 | count = pd.getValue("BB_URI_LOCALCOUNT", key + "_count") | ||
| 521 | |||
| 522 | if last_rev == latest_rev: | ||
| 523 | return str(count + "+" + latest_rev) | ||
| 524 | |||
| 525 | if count is None: | ||
| 526 | count = "0" | ||
| 527 | else: | ||
| 528 | count = str(int(count) + 1) | ||
| 529 | |||
| 530 | pd.setValue("BB_URI_LOCALCOUNT", key + "_rev", latest_rev) | ||
| 531 | pd.setValue("BB_URI_LOCALCOUNT", key + "_count", count) | ||
| 532 | |||
| 533 | return str(count + "+" + latest_rev) | ||
| 534 | |||
| 535 | |||
| 536 | import cvs | ||
| 537 | import git | ||
| 538 | import local | ||
| 539 | import svn | ||
| 540 | import wget | ||
| 541 | import svk | ||
| 542 | import ssh | ||
| 543 | import perforce | ||
| 544 | import bzr | ||
| 545 | import hg | ||
| 546 | |||
| 547 | methods.append(local.Local()) | ||
| 548 | methods.append(wget.Wget()) | ||
| 549 | methods.append(svn.Svn()) | ||
| 550 | methods.append(git.Git()) | ||
| 551 | methods.append(cvs.Cvs()) | ||
| 552 | methods.append(svk.Svk()) | ||
| 553 | methods.append(ssh.SSH()) | ||
| 554 | methods.append(perforce.Perforce()) | ||
| 555 | methods.append(bzr.Bzr()) | ||
| 556 | methods.append(hg.Hg()) | ||
diff --git a/bitbake-dev/lib/bb/fetch/bzr.py b/bitbake-dev/lib/bb/fetch/bzr.py new file mode 100644 index 0000000000..b23e9eef86 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/bzr.py | |||
| @@ -0,0 +1,154 @@ | |||
| 1 | """ | ||
| 2 | BitBake 'Fetch' implementation for bzr. | ||
| 3 | |||
| 4 | """ | ||
| 5 | |||
| 6 | # Copyright (C) 2007 Ross Burton | ||
| 7 | # Copyright (C) 2007 Richard Purdie | ||
| 8 | # | ||
| 9 | # Classes for obtaining upstream sources for the | ||
| 10 | # BitBake build tools. | ||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | |||
| 26 | import os | ||
| 27 | import sys | ||
| 28 | import bb | ||
| 29 | from bb import data | ||
| 30 | from bb.fetch import Fetch | ||
| 31 | from bb.fetch import FetchError | ||
| 32 | from bb.fetch import MissingParameterError | ||
| 33 | from bb.fetch import runfetchcmd | ||
| 34 | |||
| 35 | class Bzr(Fetch): | ||
| 36 | def supports(self, url, ud, d): | ||
| 37 | return ud.type in ['bzr'] | ||
| 38 | |||
| 39 | def localpath (self, url, ud, d): | ||
| 40 | |||
| 41 | # Create paths to bzr checkouts | ||
| 42 | relpath = ud.path | ||
| 43 | if relpath.startswith('/'): | ||
| 44 | # Remove leading slash as os.path.join can't cope | ||
| 45 | relpath = relpath[1:] | ||
| 46 | ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath) | ||
| 47 | |||
| 48 | revision = Fetch.srcrev_internal_helper(ud, d) | ||
| 49 | if revision is True: | ||
| 50 | ud.revision = self.latest_revision(url, ud, d) | ||
| 51 | elif revision: | ||
| 52 | ud.revision = revision | ||
| 53 | |||
| 54 | if not ud.revision: | ||
| 55 | ud.revision = self.latest_revision(url, ud, d) | ||
| 56 | |||
| 57 | ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d) | ||
| 58 | |||
| 59 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 60 | |||
| 61 | def _buildbzrcommand(self, ud, d, command): | ||
| 62 | """ | ||
| 63 | Build up an bzr commandline based on ud | ||
| 64 | command is "fetch", "update", "revno" | ||
| 65 | """ | ||
| 66 | |||
| 67 | basecmd = data.expand('${FETCHCMD_bzr}', d) | ||
| 68 | |||
| 69 | proto = "http" | ||
| 70 | if "proto" in ud.parm: | ||
| 71 | proto = ud.parm["proto"] | ||
| 72 | |||
| 73 | bzrroot = ud.host + ud.path | ||
| 74 | |||
| 75 | options = [] | ||
| 76 | |||
| 77 | if command is "revno": | ||
| 78 | bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) | ||
| 79 | else: | ||
| 80 | if ud.revision: | ||
| 81 | options.append("-r %s" % ud.revision) | ||
| 82 | |||
| 83 | if command is "fetch": | ||
| 84 | bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) | ||
| 85 | elif command is "update": | ||
| 86 | bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options)) | ||
| 87 | else: | ||
| 88 | raise FetchError("Invalid bzr command %s" % command) | ||
| 89 | |||
| 90 | return bzrcmd | ||
| 91 | |||
| 92 | def go(self, loc, ud, d): | ||
| 93 | """Fetch url""" | ||
| 94 | |||
| 95 | # try to use the tarball stash | ||
| 96 | if Fetch.try_mirror(d, ud.localfile): | ||
| 97 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping bzr checkout." % ud.localpath) | ||
| 98 | return | ||
| 99 | |||
| 100 | if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK): | ||
| 101 | bzrcmd = self._buildbzrcommand(ud, d, "update") | ||
| 102 | bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Update %s" % loc) | ||
| 103 | os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path))) | ||
| 104 | runfetchcmd(bzrcmd, d) | ||
| 105 | else: | ||
| 106 | os.system("rm -rf %s" % os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir))) | ||
| 107 | bzrcmd = self._buildbzrcommand(ud, d, "fetch") | ||
| 108 | bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Checkout %s" % loc) | ||
| 109 | bb.mkdirhier(ud.pkgdir) | ||
| 110 | os.chdir(ud.pkgdir) | ||
| 111 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % bzrcmd) | ||
| 112 | runfetchcmd(bzrcmd, d) | ||
| 113 | |||
| 114 | os.chdir(ud.pkgdir) | ||
| 115 | # tar them up to a defined filename | ||
| 116 | try: | ||
| 117 | runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.pkgdir)), d) | ||
| 118 | except: | ||
| 119 | t, v, tb = sys.exc_info() | ||
| 120 | try: | ||
| 121 | os.unlink(ud.localpath) | ||
| 122 | except OSError: | ||
| 123 | pass | ||
| 124 | raise t, v, tb | ||
| 125 | |||
| 126 | def suppports_srcrev(self): | ||
| 127 | return True | ||
| 128 | |||
| 129 | def _revision_key(self, url, ud, d): | ||
| 130 | """ | ||
| 131 | Return a unique key for the url | ||
| 132 | """ | ||
| 133 | return "bzr:" + ud.pkgdir | ||
| 134 | |||
| 135 | def _latest_revision(self, url, ud, d): | ||
| 136 | """ | ||
| 137 | Return the latest upstream revision number | ||
| 138 | """ | ||
| 139 | bb.msg.debug(2, bb.msg.domain.Fetcher, "BZR fetcher hitting network for %s" % url) | ||
| 140 | |||
| 141 | output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True) | ||
| 142 | |||
| 143 | return output.strip() | ||
| 144 | |||
| 145 | def _sortable_revision(self, url, ud, d): | ||
| 146 | """ | ||
| 147 | Return a sortable revision number which in our case is the revision number | ||
| 148 | """ | ||
| 149 | |||
| 150 | return self._build_revision(url, ud, d) | ||
| 151 | |||
| 152 | def _build_revision(self, url, ud, d): | ||
| 153 | return ud.revision | ||
| 154 | |||
diff --git a/bitbake-dev/lib/bb/fetch/cvs.py b/bitbake-dev/lib/bb/fetch/cvs.py new file mode 100644 index 0000000000..c4ccf4303f --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/cvs.py | |||
| @@ -0,0 +1,178 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | Classes for obtaining upstream sources for the | ||
| 7 | BitBake build tools. | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | # | ||
| 26 | #Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 27 | # | ||
| 28 | |||
| 29 | import os, re | ||
| 30 | import bb | ||
| 31 | from bb import data | ||
| 32 | from bb.fetch import Fetch | ||
| 33 | from bb.fetch import FetchError | ||
| 34 | from bb.fetch import MissingParameterError | ||
| 35 | |||
| 36 | class Cvs(Fetch): | ||
| 37 | """ | ||
| 38 | Class to fetch a module or modules from cvs repositories | ||
| 39 | """ | ||
| 40 | def supports(self, url, ud, d): | ||
| 41 | """ | ||
| 42 | Check to see if a given url can be fetched with cvs. | ||
| 43 | """ | ||
| 44 | return ud.type in ['cvs', 'pserver'] | ||
| 45 | |||
| 46 | def localpath(self, url, ud, d): | ||
| 47 | if not "module" in ud.parm: | ||
| 48 | raise MissingParameterError("cvs method needs a 'module' parameter") | ||
| 49 | ud.module = ud.parm["module"] | ||
| 50 | |||
| 51 | ud.tag = "" | ||
| 52 | if 'tag' in ud.parm: | ||
| 53 | ud.tag = ud.parm['tag'] | ||
| 54 | |||
| 55 | # Override the default date in certain cases | ||
| 56 | if 'date' in ud.parm: | ||
| 57 | ud.date = ud.parm['date'] | ||
| 58 | elif ud.tag: | ||
| 59 | ud.date = "" | ||
| 60 | |||
| 61 | norecurse = '' | ||
| 62 | if 'norecurse' in ud.parm: | ||
| 63 | norecurse = '_norecurse' | ||
| 64 | |||
| 65 | fullpath = '' | ||
| 66 | if 'fullpath' in ud.parm: | ||
| 67 | fullpath = '_fullpath' | ||
| 68 | |||
| 69 | ud.localfile = data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d) | ||
| 70 | |||
| 71 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 72 | |||
| 73 | def forcefetch(self, url, ud, d): | ||
| 74 | if (ud.date == "now"): | ||
| 75 | return True | ||
| 76 | return False | ||
| 77 | |||
| 78 | def go(self, loc, ud, d): | ||
| 79 | |||
| 80 | # try to use the tarball stash | ||
| 81 | if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): | ||
| 82 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath) | ||
| 83 | return | ||
| 84 | |||
| 85 | method = "pserver" | ||
| 86 | if "method" in ud.parm: | ||
| 87 | method = ud.parm["method"] | ||
| 88 | |||
| 89 | localdir = ud.module | ||
| 90 | if "localdir" in ud.parm: | ||
| 91 | localdir = ud.parm["localdir"] | ||
| 92 | |||
| 93 | cvs_port = "" | ||
| 94 | if "port" in ud.parm: | ||
| 95 | cvs_port = ud.parm["port"] | ||
| 96 | |||
| 97 | cvs_rsh = None | ||
| 98 | if method == "ext": | ||
| 99 | if "rsh" in ud.parm: | ||
| 100 | cvs_rsh = ud.parm["rsh"] | ||
| 101 | |||
| 102 | if method == "dir": | ||
| 103 | cvsroot = ud.path | ||
| 104 | else: | ||
| 105 | cvsroot = ":" + method | ||
| 106 | cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True) | ||
| 107 | if cvsproxyhost: | ||
| 108 | cvsroot += ";proxy=" + cvsproxyhost | ||
| 109 | cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True) | ||
| 110 | if cvsproxyport: | ||
| 111 | cvsroot += ";proxyport=" + cvsproxyport | ||
| 112 | cvsroot += ":" + ud.user | ||
| 113 | if ud.pswd: | ||
| 114 | cvsroot += ":" + ud.pswd | ||
| 115 | cvsroot += "@" + ud.host + ":" + cvs_port + ud.path | ||
| 116 | |||
| 117 | options = [] | ||
| 118 | if 'norecurse' in ud.parm: | ||
| 119 | options.append("-l") | ||
| 120 | if ud.date: | ||
| 121 | options.append("-D \"%s UTC\"" % ud.date) | ||
| 122 | if ud.tag: | ||
| 123 | options.append("-r %s" % ud.tag) | ||
| 124 | |||
| 125 | localdata = data.createCopy(d) | ||
| 126 | data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) | ||
| 127 | data.update_data(localdata) | ||
| 128 | |||
| 129 | data.setVar('CVSROOT', cvsroot, localdata) | ||
| 130 | data.setVar('CVSCOOPTS', " ".join(options), localdata) | ||
| 131 | data.setVar('CVSMODULE', ud.module, localdata) | ||
| 132 | cvscmd = data.getVar('FETCHCOMMAND', localdata, 1) | ||
| 133 | cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1) | ||
| 134 | |||
| 135 | if cvs_rsh: | ||
| 136 | cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) | ||
| 137 | cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) | ||
| 138 | |||
| 139 | # create module directory | ||
| 140 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") | ||
| 141 | pkg = data.expand('${PN}', d) | ||
| 142 | pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) | ||
| 143 | moddir = os.path.join(pkgdir,localdir) | ||
| 144 | if os.access(os.path.join(moddir,'CVS'), os.R_OK): | ||
| 145 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | ||
| 146 | # update sources there | ||
| 147 | os.chdir(moddir) | ||
| 148 | myret = os.system(cvsupdatecmd) | ||
| 149 | else: | ||
| 150 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
| 151 | # check out sources there | ||
| 152 | bb.mkdirhier(pkgdir) | ||
| 153 | os.chdir(pkgdir) | ||
| 154 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd) | ||
| 155 | myret = os.system(cvscmd) | ||
| 156 | |||
| 157 | if myret != 0 or not os.access(moddir, os.R_OK): | ||
| 158 | try: | ||
| 159 | os.rmdir(moddir) | ||
| 160 | except OSError: | ||
| 161 | pass | ||
| 162 | raise FetchError(ud.module) | ||
| 163 | |||
| 164 | # tar them up to a defined filename | ||
| 165 | if 'fullpath' in ud.parm: | ||
| 166 | os.chdir(pkgdir) | ||
| 167 | myret = os.system("tar -czf %s %s" % (ud.localpath, localdir)) | ||
| 168 | else: | ||
| 169 | os.chdir(moddir) | ||
| 170 | os.chdir('..') | ||
| 171 | myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir))) | ||
| 172 | |||
| 173 | if myret != 0: | ||
| 174 | try: | ||
| 175 | os.unlink(ud.localpath) | ||
| 176 | except OSError: | ||
| 177 | pass | ||
| 178 | raise FetchError(ud.module) | ||
diff --git a/bitbake-dev/lib/bb/fetch/git.py b/bitbake-dev/lib/bb/fetch/git.py new file mode 100644 index 0000000000..f4ae724f87 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/git.py | |||
| @@ -0,0 +1,142 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' git implementation | ||
| 5 | |||
| 6 | """ | ||
| 7 | |||
| 8 | #Copyright (C) 2005 Richard Purdie | ||
| 9 | # | ||
| 10 | # This program is free software; you can redistribute it and/or modify | ||
| 11 | # it under the terms of the GNU General Public License version 2 as | ||
| 12 | # published by the Free Software Foundation. | ||
| 13 | # | ||
| 14 | # This program is distributed in the hope that it will be useful, | ||
| 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | # GNU General Public License for more details. | ||
| 18 | # | ||
| 19 | # You should have received a copy of the GNU General Public License along | ||
| 20 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 21 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 22 | |||
| 23 | import os, re | ||
| 24 | import bb | ||
| 25 | from bb import data | ||
| 26 | from bb.fetch import Fetch | ||
| 27 | from bb.fetch import FetchError | ||
| 28 | from bb.fetch import runfetchcmd | ||
| 29 | |||
| 30 | def prunedir(topdir): | ||
| 31 | # Delete everything reachable from the directory named in 'topdir'. | ||
| 32 | # CAUTION: This is dangerous! | ||
| 33 | for root, dirs, files in os.walk(topdir, topdown=False): | ||
| 34 | for name in files: | ||
| 35 | os.remove(os.path.join(root, name)) | ||
| 36 | for name in dirs: | ||
| 37 | os.rmdir(os.path.join(root, name)) | ||
| 38 | |||
| 39 | class Git(Fetch): | ||
| 40 | """Class to fetch a module or modules from git repositories""" | ||
| 41 | def supports(self, url, ud, d): | ||
| 42 | """ | ||
| 43 | Check to see if a given url can be fetched with git. | ||
| 44 | """ | ||
| 45 | return ud.type in ['git'] | ||
| 46 | |||
| 47 | def localpath(self, url, ud, d): | ||
| 48 | |||
| 49 | ud.proto = "rsync" | ||
| 50 | if 'protocol' in ud.parm: | ||
| 51 | ud.proto = ud.parm['protocol'] | ||
| 52 | |||
| 53 | ud.branch = ud.parm.get("branch", "master") | ||
| 54 | |||
| 55 | tag = Fetch.srcrev_internal_helper(ud, d) | ||
| 56 | if tag is True: | ||
| 57 | ud.tag = self.latest_revision(url, ud, d) | ||
| 58 | elif tag: | ||
| 59 | ud.tag = tag | ||
| 60 | |||
| 61 | if not ud.tag: | ||
| 62 | ud.tag = self.latest_revision(url, ud, d) | ||
| 63 | |||
| 64 | if ud.tag == "master": | ||
| 65 | ud.tag = self.latest_revision(url, ud, d) | ||
| 66 | |||
| 67 | ud.localfile = data.expand('git_%s%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.tag), d) | ||
| 68 | |||
| 69 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 70 | |||
| 71 | def go(self, loc, ud, d): | ||
| 72 | """Fetch url""" | ||
| 73 | |||
| 74 | if Fetch.try_mirror(d, ud.localfile): | ||
| 75 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists (or was stashed). Skipping git checkout." % ud.localpath) | ||
| 76 | return | ||
| 77 | |||
| 78 | gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.')) | ||
| 79 | |||
| 80 | repofilename = 'git_%s.tar.gz' % (gitsrcname) | ||
| 81 | repofile = os.path.join(data.getVar("DL_DIR", d, 1), repofilename) | ||
| 82 | repodir = os.path.join(data.expand('${GITDIR}', d), gitsrcname) | ||
| 83 | |||
| 84 | coname = '%s' % (ud.tag) | ||
| 85 | codir = os.path.join(repodir, coname) | ||
| 86 | |||
| 87 | if not os.path.exists(repodir): | ||
| 88 | if Fetch.try_mirror(d, repofilename): | ||
| 89 | bb.mkdirhier(repodir) | ||
| 90 | os.chdir(repodir) | ||
| 91 | runfetchcmd("tar -xzf %s" % (repofile), d) | ||
| 92 | else: | ||
| 93 | runfetchcmd("git clone -n %s://%s%s %s" % (ud.proto, ud.host, ud.path, repodir), d) | ||
| 94 | |||
| 95 | os.chdir(repodir) | ||
| 96 | # Remove all but the .git directory | ||
| 97 | runfetchcmd("rm * -Rf", d) | ||
| 98 | runfetchcmd("git fetch %s://%s%s %s" % (ud.proto, ud.host, ud.path, ud.branch), d) | ||
| 99 | runfetchcmd("git fetch --tags %s://%s%s" % (ud.proto, ud.host, ud.path), d) | ||
| 100 | runfetchcmd("git prune-packed", d) | ||
| 101 | runfetchcmd("git pack-redundant --all | xargs -r rm", d) | ||
| 102 | |||
| 103 | os.chdir(repodir) | ||
| 104 | mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) | ||
| 105 | if mirror_tarballs != "0": | ||
| 106 | bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository") | ||
| 107 | runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d) | ||
| 108 | |||
| 109 | if os.path.exists(codir): | ||
| 110 | prunedir(codir) | ||
| 111 | |||
| 112 | bb.mkdirhier(codir) | ||
| 113 | os.chdir(repodir) | ||
| 114 | runfetchcmd("git read-tree %s" % (ud.tag), d) | ||
| 115 | runfetchcmd("git checkout-index -q -f --prefix=%s -a" % (os.path.join(codir, "git", "")), d) | ||
| 116 | |||
| 117 | os.chdir(codir) | ||
| 118 | bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git checkout") | ||
| 119 | runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.join(".", "*") ), d) | ||
| 120 | |||
| 121 | os.chdir(repodir) | ||
| 122 | prunedir(codir) | ||
| 123 | |||
| 124 | def suppports_srcrev(self): | ||
| 125 | return True | ||
| 126 | |||
| 127 | def _revision_key(self, url, ud, d): | ||
| 128 | """ | ||
| 129 | Return a unique key for the url | ||
| 130 | """ | ||
| 131 | return "git:" + ud.host + ud.path.replace('/', '.') | ||
| 132 | |||
| 133 | def _latest_revision(self, url, ud, d): | ||
| 134 | """ | ||
| 135 | Compute the HEAD revision for the url | ||
| 136 | """ | ||
| 137 | output = runfetchcmd("git ls-remote %s://%s%s %s" % (ud.proto, ud.host, ud.path, ud.branch), d, True) | ||
| 138 | return output.split()[0] | ||
| 139 | |||
| 140 | def _build_revision(self, url, ud, d): | ||
| 141 | return ud.tag | ||
| 142 | |||
diff --git a/bitbake-dev/lib/bb/fetch/hg.py b/bitbake-dev/lib/bb/fetch/hg.py new file mode 100644 index 0000000000..ee3bd2f7fe --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/hg.py | |||
| @@ -0,0 +1,141 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementation for mercurial DRCS (hg). | ||
| 5 | |||
| 6 | """ | ||
| 7 | |||
| 8 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 9 | # Copyright (C) 2004 Marcin Juszkiewicz | ||
| 10 | # Copyright (C) 2007 Robert Schuster | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | # | ||
| 25 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 26 | |||
| 27 | import os, re | ||
| 28 | import sys | ||
| 29 | import bb | ||
| 30 | from bb import data | ||
| 31 | from bb.fetch import Fetch | ||
| 32 | from bb.fetch import FetchError | ||
| 33 | from bb.fetch import MissingParameterError | ||
| 34 | from bb.fetch import runfetchcmd | ||
| 35 | |||
| 36 | class Hg(Fetch): | ||
| 37 | """Class to fetch a from mercurial repositories""" | ||
| 38 | def supports(self, url, ud, d): | ||
| 39 | """ | ||
| 40 | Check to see if a given url can be fetched with mercurial. | ||
| 41 | """ | ||
| 42 | return ud.type in ['hg'] | ||
| 43 | |||
| 44 | def localpath(self, url, ud, d): | ||
| 45 | if not "module" in ud.parm: | ||
| 46 | raise MissingParameterError("hg method needs a 'module' parameter") | ||
| 47 | |||
| 48 | ud.module = ud.parm["module"] | ||
| 49 | |||
| 50 | # Create paths to mercurial checkouts | ||
| 51 | relpath = ud.path | ||
| 52 | if relpath.startswith('/'): | ||
| 53 | # Remove leading slash as os.path.join can't cope | ||
| 54 | relpath = relpath[1:] | ||
| 55 | ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath) | ||
| 56 | ud.moddir = os.path.join(ud.pkgdir, ud.module) | ||
| 57 | |||
| 58 | if 'rev' in ud.parm: | ||
| 59 | ud.revision = ud.parm['rev'] | ||
| 60 | |||
| 61 | ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d) | ||
| 62 | |||
| 63 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 64 | |||
| 65 | def _buildhgcommand(self, ud, d, command): | ||
| 66 | """ | ||
| 67 | Build up an hg commandline based on ud | ||
| 68 | command is "fetch", "update", "info" | ||
| 69 | """ | ||
| 70 | |||
| 71 | basecmd = data.expand('${FETCHCMD_hg}', d) | ||
| 72 | |||
| 73 | proto = "http" | ||
| 74 | if "proto" in ud.parm: | ||
| 75 | proto = ud.parm["proto"] | ||
| 76 | |||
| 77 | host = ud.host | ||
| 78 | if proto == "file": | ||
| 79 | host = "/" | ||
| 80 | ud.host = "localhost" | ||
| 81 | |||
| 82 | hgroot = host + ud.path | ||
| 83 | |||
| 84 | if command is "info": | ||
| 85 | return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module) | ||
| 86 | |||
| 87 | options = []; | ||
| 88 | if ud.revision: | ||
| 89 | options.append("-r %s" % ud.revision) | ||
| 90 | |||
| 91 | if command is "fetch": | ||
| 92 | cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module) | ||
| 93 | elif command is "pull": | ||
| 94 | cmd = "%s pull %s" % (basecmd, " ".join(options)) | ||
| 95 | elif command is "update": | ||
| 96 | cmd = "%s update -C %s" % (basecmd, " ".join(options)) | ||
| 97 | else: | ||
| 98 | raise FetchError("Invalid hg command %s" % command) | ||
| 99 | |||
| 100 | return cmd | ||
| 101 | |||
| 102 | def go(self, loc, ud, d): | ||
| 103 | """Fetch url""" | ||
| 104 | |||
| 105 | # try to use the tarball stash | ||
| 106 | if Fetch.try_mirror(d, ud.localfile): | ||
| 107 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping hg checkout." % ud.localpath) | ||
| 108 | return | ||
| 109 | |||
| 110 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") | ||
| 111 | |||
| 112 | if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK): | ||
| 113 | updatecmd = self._buildhgcommand(ud, d, "pull") | ||
| 114 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | ||
| 115 | # update sources there | ||
| 116 | os.chdir(ud.moddir) | ||
| 117 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) | ||
| 118 | runfetchcmd(updatecmd, d) | ||
| 119 | |||
| 120 | updatecmd = self._buildhgcommand(ud, d, "update") | ||
| 121 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) | ||
| 122 | runfetchcmd(updatecmd, d) | ||
| 123 | else: | ||
| 124 | fetchcmd = self._buildhgcommand(ud, d, "fetch") | ||
| 125 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
| 126 | # check out sources there | ||
| 127 | bb.mkdirhier(ud.pkgdir) | ||
| 128 | os.chdir(ud.pkgdir) | ||
| 129 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd) | ||
| 130 | runfetchcmd(fetchcmd, d) | ||
| 131 | |||
| 132 | os.chdir(ud.pkgdir) | ||
| 133 | try: | ||
| 134 | runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d) | ||
| 135 | except: | ||
| 136 | t, v, tb = sys.exc_info() | ||
| 137 | try: | ||
| 138 | os.unlink(ud.localpath) | ||
| 139 | except OSError: | ||
| 140 | pass | ||
| 141 | raise t, v, tb | ||
diff --git a/bitbake-dev/lib/bb/fetch/local.py b/bitbake-dev/lib/bb/fetch/local.py new file mode 100644 index 0000000000..54d598ae89 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/local.py | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | Classes for obtaining upstream sources for the | ||
| 7 | BitBake build tools. | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | # | ||
| 26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 27 | |||
| 28 | import os, re | ||
| 29 | import bb | ||
| 30 | from bb import data | ||
| 31 | from bb.fetch import Fetch | ||
| 32 | |||
| 33 | class Local(Fetch): | ||
| 34 | def supports(self, url, urldata, d): | ||
| 35 | """ | ||
| 36 | Check to see if a given url can be fetched with cvs. | ||
| 37 | """ | ||
| 38 | return urldata.type in ['file','patch'] | ||
| 39 | |||
| 40 | def localpath(self, url, urldata, d): | ||
| 41 | """ | ||
| 42 | Return the local filename of a given url assuming a successful fetch. | ||
| 43 | """ | ||
| 44 | path = url.split("://")[1] | ||
| 45 | path = path.split(";")[0] | ||
| 46 | newpath = path | ||
| 47 | if path[0] != "/": | ||
| 48 | filespath = data.getVar('FILESPATH', d, 1) | ||
| 49 | if filespath: | ||
| 50 | newpath = bb.which(filespath, path) | ||
| 51 | if not newpath: | ||
| 52 | filesdir = data.getVar('FILESDIR', d, 1) | ||
| 53 | if filesdir: | ||
| 54 | newpath = os.path.join(filesdir, path) | ||
| 55 | # We don't set localfile as for this fetcher the file is already local! | ||
| 56 | return newpath | ||
| 57 | |||
| 58 | def go(self, url, urldata, d): | ||
| 59 | """Fetch urls (no-op for Local method)""" | ||
| 60 | # no need to fetch local files, we'll deal with them in place. | ||
| 61 | return 1 | ||
| 62 | |||
| 63 | def checkstatus(self, url, urldata, d): | ||
| 64 | """ | ||
| 65 | Check the status of the url | ||
| 66 | """ | ||
| 67 | if urldata.localpath.find("*") != -1: | ||
| 68 | bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s looks like a glob and was therefore not checked." % url) | ||
| 69 | return True | ||
| 70 | if os.path.exists(urldata.localpath): | ||
| 71 | return True | ||
| 72 | return False | ||
diff --git a/bitbake-dev/lib/bb/fetch/perforce.py b/bitbake-dev/lib/bb/fetch/perforce.py new file mode 100644 index 0000000000..b594d2bde2 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/perforce.py | |||
| @@ -0,0 +1,213 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | Classes for obtaining upstream sources for the | ||
| 7 | BitBake build tools. | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | # | ||
| 26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 27 | |||
| 28 | import os, re | ||
| 29 | import bb | ||
| 30 | from bb import data | ||
| 31 | from bb.fetch import Fetch | ||
| 32 | from bb.fetch import FetchError | ||
| 33 | from bb.fetch import MissingParameterError | ||
| 34 | |||
| 35 | class Perforce(Fetch): | ||
| 36 | def supports(self, url, ud, d): | ||
| 37 | return ud.type in ['p4'] | ||
| 38 | |||
| 39 | def doparse(url,d): | ||
| 40 | parm = {} | ||
| 41 | path = url.split("://")[1] | ||
| 42 | delim = path.find("@"); | ||
| 43 | if delim != -1: | ||
| 44 | (user,pswd,host,port) = path.split('@')[0].split(":") | ||
| 45 | path = path.split('@')[1] | ||
| 46 | else: | ||
| 47 | (host,port) = data.getVar('P4PORT', d).split(':') | ||
| 48 | user = "" | ||
| 49 | pswd = "" | ||
| 50 | |||
| 51 | if path.find(";") != -1: | ||
| 52 | keys=[] | ||
| 53 | values=[] | ||
| 54 | plist = path.split(';') | ||
| 55 | for item in plist: | ||
| 56 | if item.count('='): | ||
| 57 | (key,value) = item.split('=') | ||
| 58 | keys.append(key) | ||
| 59 | values.append(value) | ||
| 60 | |||
| 61 | parm = dict(zip(keys,values)) | ||
| 62 | path = "//" + path.split(';')[0] | ||
| 63 | host += ":%s" % (port) | ||
| 64 | parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) | ||
| 65 | |||
| 66 | return host,path,user,pswd,parm | ||
| 67 | doparse = staticmethod(doparse) | ||
| 68 | |||
| 69 | def getcset(d, depot,host,user,pswd,parm): | ||
| 70 | if "cset" in parm: | ||
| 71 | return parm["cset"]; | ||
| 72 | if user: | ||
| 73 | data.setVar('P4USER', user, d) | ||
| 74 | if pswd: | ||
| 75 | data.setVar('P4PASSWD', pswd, d) | ||
| 76 | if host: | ||
| 77 | data.setVar('P4PORT', host, d) | ||
| 78 | |||
| 79 | p4date = data.getVar("P4DATE", d, 1) | ||
| 80 | if "revision" in parm: | ||
| 81 | depot += "#%s" % (parm["revision"]) | ||
| 82 | elif "label" in parm: | ||
| 83 | depot += "@%s" % (parm["label"]) | ||
| 84 | elif p4date: | ||
| 85 | depot += "@%s" % (p4date) | ||
| 86 | |||
| 87 | p4cmd = data.getVar('FETCHCOMMAND_p4', d, 1) | ||
| 88 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s changes -m 1 %s" % (p4cmd, depot)) | ||
| 89 | p4file = os.popen("%s changes -m 1 %s" % (p4cmd,depot)) | ||
| 90 | cset = p4file.readline().strip() | ||
| 91 | bb.msg.debug(1, bb.msg.domain.Fetcher, "READ %s" % (cset)) | ||
| 92 | if not cset: | ||
| 93 | return -1 | ||
| 94 | |||
| 95 | return cset.split(' ')[1] | ||
| 96 | getcset = staticmethod(getcset) | ||
| 97 | |||
| 98 | def localpath(self, url, ud, d): | ||
| 99 | |||
| 100 | (host,path,user,pswd,parm) = Perforce.doparse(url,d) | ||
| 101 | |||
| 102 | # If a label is specified, we use that as our filename | ||
| 103 | |||
| 104 | if "label" in parm: | ||
| 105 | ud.localfile = "%s.tar.gz" % (parm["label"]) | ||
| 106 | return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) | ||
| 107 | |||
| 108 | base = path | ||
| 109 | which = path.find('/...') | ||
| 110 | if which != -1: | ||
| 111 | base = path[:which] | ||
| 112 | |||
| 113 | if base[0] == "/": | ||
| 114 | base = base[1:] | ||
| 115 | |||
| 116 | cset = Perforce.getcset(d, path, host, user, pswd, parm) | ||
| 117 | |||
| 118 | ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d) | ||
| 119 | |||
| 120 | return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) | ||
| 121 | |||
| 122 | def go(self, loc, ud, d): | ||
| 123 | """ | ||
| 124 | Fetch urls | ||
| 125 | """ | ||
| 126 | |||
| 127 | # try to use the tarball stash | ||
| 128 | if Fetch.try_mirror(d, ud.localfile): | ||
| 129 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath) | ||
| 130 | return | ||
| 131 | |||
| 132 | (host,depot,user,pswd,parm) = Perforce.doparse(loc, d) | ||
| 133 | |||
| 134 | if depot.find('/...') != -1: | ||
| 135 | path = depot[:depot.find('/...')] | ||
| 136 | else: | ||
| 137 | path = depot | ||
| 138 | |||
| 139 | if "module" in parm: | ||
| 140 | module = parm["module"] | ||
| 141 | else: | ||
| 142 | module = os.path.basename(path) | ||
| 143 | |||
| 144 | localdata = data.createCopy(d) | ||
| 145 | data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata) | ||
| 146 | data.update_data(localdata) | ||
| 147 | |||
| 148 | # Get the p4 command | ||
| 149 | if user: | ||
| 150 | data.setVar('P4USER', user, localdata) | ||
| 151 | |||
| 152 | if pswd: | ||
| 153 | data.setVar('P4PASSWD', pswd, localdata) | ||
| 154 | |||
| 155 | if host: | ||
| 156 | data.setVar('P4PORT', host, localdata) | ||
| 157 | |||
| 158 | p4cmd = data.getVar('FETCHCOMMAND', localdata, 1) | ||
| 159 | |||
| 160 | # create temp directory | ||
| 161 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") | ||
| 162 | bb.mkdirhier(data.expand('${WORKDIR}', localdata)) | ||
| 163 | data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) | ||
| 164 | tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") | ||
| 165 | tmpfile = tmppipe.readline().strip() | ||
| 166 | if not tmpfile: | ||
| 167 | bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") | ||
| 168 | raise FetchError(module) | ||
| 169 | |||
| 170 | if "label" in parm: | ||
| 171 | depot = "%s@%s" % (depot,parm["label"]) | ||
| 172 | else: | ||
| 173 | cset = Perforce.getcset(d, depot, host, user, pswd, parm) | ||
| 174 | depot = "%s@%s" % (depot,cset) | ||
| 175 | |||
| 176 | os.chdir(tmpfile) | ||
| 177 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
| 178 | bb.msg.note(1, bb.msg.domain.Fetcher, "%s files %s" % (p4cmd, depot)) | ||
| 179 | p4file = os.popen("%s files %s" % (p4cmd, depot)) | ||
| 180 | |||
| 181 | if not p4file: | ||
| 182 | bb.error("Fetch: unable to get the P4 files from %s" % (depot)) | ||
| 183 | raise FetchError(module) | ||
| 184 | |||
| 185 | count = 0 | ||
| 186 | |||
| 187 | for file in p4file: | ||
| 188 | list = file.split() | ||
| 189 | |||
| 190 | if list[2] == "delete": | ||
| 191 | continue | ||
| 192 | |||
| 193 | dest = list[0][len(path)+1:] | ||
| 194 | where = dest.find("#") | ||
| 195 | |||
| 196 | os.system("%s print -o %s/%s %s" % (p4cmd, module,dest[:where],list[0])) | ||
| 197 | count = count + 1 | ||
| 198 | |||
| 199 | if count == 0: | ||
| 200 | bb.error("Fetch: No files gathered from the P4 fetch") | ||
| 201 | raise FetchError(module) | ||
| 202 | |||
| 203 | myret = os.system("tar -czf %s %s" % (ud.localpath, module)) | ||
| 204 | if myret != 0: | ||
| 205 | try: | ||
| 206 | os.unlink(ud.localpath) | ||
| 207 | except OSError: | ||
| 208 | pass | ||
| 209 | raise FetchError(module) | ||
| 210 | # cleanup | ||
| 211 | os.system('rm -rf %s' % tmpfile) | ||
| 212 | |||
| 213 | |||
diff --git a/bitbake-dev/lib/bb/fetch/ssh.py b/bitbake-dev/lib/bb/fetch/ssh.py new file mode 100644 index 0000000000..81a9892dcc --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/ssh.py | |||
| @@ -0,0 +1,120 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | ''' | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | This implementation is for Secure Shell (SSH), and attempts to comply with the | ||
| 7 | IETF secsh internet draft: | ||
| 8 | http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/ | ||
| 9 | |||
| 10 | Currently does not support the sftp parameters, as this uses scp | ||
| 11 | Also does not support the 'fingerprint' connection parameter. | ||
| 12 | |||
| 13 | ''' | ||
| 14 | |||
| 15 | # Copyright (C) 2006 OpenedHand Ltd. | ||
| 16 | # | ||
| 17 | # | ||
| 18 | # Based in part on svk.py: | ||
| 19 | # Copyright (C) 2006 Holger Hans Peter Freyther | ||
| 20 | # Based on svn.py: | ||
| 21 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 22 | # Based on functions from the base bb module: | ||
| 23 | # Copyright 2003 Holger Schurig | ||
| 24 | # | ||
| 25 | # | ||
| 26 | # This program is free software; you can redistribute it and/or modify | ||
| 27 | # it under the terms of the GNU General Public License version 2 as | ||
| 28 | # published by the Free Software Foundation. | ||
| 29 | # | ||
| 30 | # This program is distributed in the hope that it will be useful, | ||
| 31 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 32 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 33 | # GNU General Public License for more details. | ||
| 34 | # | ||
| 35 | # You should have received a copy of the GNU General Public License along | ||
| 36 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 37 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 38 | |||
| 39 | import re, os | ||
| 40 | import bb | ||
| 41 | from bb import data | ||
| 42 | from bb.fetch import Fetch | ||
| 43 | from bb.fetch import FetchError | ||
| 44 | from bb.fetch import MissingParameterError | ||
| 45 | |||
| 46 | |||
| 47 | __pattern__ = re.compile(r''' | ||
| 48 | \s* # Skip leading whitespace | ||
| 49 | ssh:// # scheme | ||
| 50 | ( # Optional username/password block | ||
| 51 | (?P<user>\S+) # username | ||
| 52 | (:(?P<pass>\S+))? # colon followed by the password (optional) | ||
| 53 | )? | ||
| 54 | (?P<cparam>(;[^;]+)*)? # connection parameters block (optional) | ||
| 55 | @ | ||
| 56 | (?P<host>\S+?) # non-greedy match of the host | ||
| 57 | (:(?P<port>[0-9]+))? # colon followed by the port (optional) | ||
| 58 | / | ||
| 59 | (?P<path>[^;]+) # path on the remote system, may be absolute or relative, | ||
| 60 | # and may include the use of '~' to reference the remote home | ||
| 61 | # directory | ||
| 62 | (?P<sparam>(;[^;]+)*)? # parameters block (optional) | ||
| 63 | $ | ||
| 64 | ''', re.VERBOSE) | ||
| 65 | |||
| 66 | class SSH(Fetch): | ||
| 67 | '''Class to fetch a module or modules via Secure Shell''' | ||
| 68 | |||
| 69 | def supports(self, url, urldata, d): | ||
| 70 | return __pattern__.match(url) != None | ||
| 71 | |||
| 72 | def localpath(self, url, urldata, d): | ||
| 73 | m = __pattern__.match(url) | ||
| 74 | path = m.group('path') | ||
| 75 | host = m.group('host') | ||
| 76 | lpath = os.path.join(data.getVar('DL_DIR', d, True), host, os.path.basename(path)) | ||
| 77 | return lpath | ||
| 78 | |||
| 79 | def go(self, url, urldata, d): | ||
| 80 | dldir = data.getVar('DL_DIR', d, 1) | ||
| 81 | |||
| 82 | m = __pattern__.match(url) | ||
| 83 | path = m.group('path') | ||
| 84 | host = m.group('host') | ||
| 85 | port = m.group('port') | ||
| 86 | user = m.group('user') | ||
| 87 | password = m.group('pass') | ||
| 88 | |||
| 89 | ldir = os.path.join(dldir, host) | ||
| 90 | lpath = os.path.join(ldir, os.path.basename(path)) | ||
| 91 | |||
| 92 | if not os.path.exists(ldir): | ||
| 93 | os.makedirs(ldir) | ||
| 94 | |||
| 95 | if port: | ||
| 96 | port = '-P %s' % port | ||
| 97 | else: | ||
| 98 | port = '' | ||
| 99 | |||
| 100 | if user: | ||
| 101 | fr = user | ||
| 102 | if password: | ||
| 103 | fr += ':%s' % password | ||
| 104 | fr += '@%s' % host | ||
| 105 | else: | ||
| 106 | fr = host | ||
| 107 | fr += ':%s' % path | ||
| 108 | |||
| 109 | |||
| 110 | import commands | ||
| 111 | cmd = 'scp -B -r %s %s %s/' % ( | ||
| 112 | port, | ||
| 113 | commands.mkarg(fr), | ||
| 114 | commands.mkarg(ldir) | ||
| 115 | ) | ||
| 116 | |||
| 117 | (exitstatus, output) = commands.getstatusoutput(cmd) | ||
| 118 | if exitstatus != 0: | ||
| 119 | print output | ||
| 120 | raise FetchError('Unable to fetch %s' % url) | ||
diff --git a/bitbake-dev/lib/bb/fetch/svk.py b/bitbake-dev/lib/bb/fetch/svk.py new file mode 100644 index 0000000000..d863ccb6e0 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/svk.py | |||
| @@ -0,0 +1,109 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | This implementation is for svk. It is based on the svn implementation | ||
| 7 | |||
| 8 | """ | ||
| 9 | |||
| 10 | # Copyright (C) 2006 Holger Hans Peter Freyther | ||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | # | ||
| 26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 27 | |||
| 28 | import os, re | ||
| 29 | import bb | ||
| 30 | from bb import data | ||
| 31 | from bb.fetch import Fetch | ||
| 32 | from bb.fetch import FetchError | ||
| 33 | from bb.fetch import MissingParameterError | ||
| 34 | |||
| 35 | class Svk(Fetch): | ||
| 36 | """Class to fetch a module or modules from svk repositories""" | ||
| 37 | def supports(self, url, ud, d): | ||
| 38 | """ | ||
| 39 | Check to see if a given url can be fetched with cvs. | ||
| 40 | """ | ||
| 41 | return ud.type in ['svk'] | ||
| 42 | |||
| 43 | def localpath(self, url, ud, d): | ||
| 44 | if not "module" in ud.parm: | ||
| 45 | raise MissingParameterError("svk method needs a 'module' parameter") | ||
| 46 | else: | ||
| 47 | ud.module = ud.parm["module"] | ||
| 48 | |||
| 49 | ud.revision = "" | ||
| 50 | if 'rev' in ud.parm: | ||
| 51 | ud.revision = ud.parm['rev'] | ||
| 52 | |||
| 53 | ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d) | ||
| 54 | |||
| 55 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 56 | |||
| 57 | def forcefetch(self, url, ud, d): | ||
| 58 | if (ud.date == "now"): | ||
| 59 | return True | ||
| 60 | return False | ||
| 61 | |||
| 62 | def go(self, loc, ud, d): | ||
| 63 | """Fetch urls""" | ||
| 64 | |||
| 65 | if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): | ||
| 66 | return | ||
| 67 | |||
| 68 | svkroot = ud.host + ud.path | ||
| 69 | |||
| 70 | svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, ud.module) | ||
| 71 | |||
| 72 | if ud.revision: | ||
| 73 | svkcmd = "svk co -r %s/%s" % (ud.revision, svkroot, ud.module) | ||
| 74 | |||
| 75 | # create temp directory | ||
| 76 | localdata = data.createCopy(d) | ||
| 77 | data.update_data(localdata) | ||
| 78 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") | ||
| 79 | bb.mkdirhier(data.expand('${WORKDIR}', localdata)) | ||
| 80 | data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) | ||
| 81 | tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") | ||
| 82 | tmpfile = tmppipe.readline().strip() | ||
| 83 | if not tmpfile: | ||
| 84 | bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") | ||
| 85 | raise FetchError(ud.module) | ||
| 86 | |||
| 87 | # check out sources there | ||
| 88 | os.chdir(tmpfile) | ||
| 89 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
| 90 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd) | ||
| 91 | myret = os.system(svkcmd) | ||
| 92 | if myret != 0: | ||
| 93 | try: | ||
| 94 | os.rmdir(tmpfile) | ||
| 95 | except OSError: | ||
| 96 | pass | ||
| 97 | raise FetchError(ud.module) | ||
| 98 | |||
| 99 | os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) | ||
| 100 | # tar them up to a defined filename | ||
| 101 | myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module))) | ||
| 102 | if myret != 0: | ||
| 103 | try: | ||
| 104 | os.unlink(ud.localpath) | ||
| 105 | except OSError: | ||
| 106 | pass | ||
| 107 | raise FetchError(ud.module) | ||
| 108 | # cleanup | ||
| 109 | os.system('rm -rf %s' % tmpfile) | ||
diff --git a/bitbake-dev/lib/bb/fetch/svn.py b/bitbake-dev/lib/bb/fetch/svn.py new file mode 100644 index 0000000000..5e5b31b3ad --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/svn.py | |||
| @@ -0,0 +1,204 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementation for svn. | ||
| 5 | |||
| 6 | """ | ||
| 7 | |||
| 8 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 9 | # Copyright (C) 2004 Marcin Juszkiewicz | ||
| 10 | # | ||
| 11 | # This program is free software; you can redistribute it and/or modify | ||
| 12 | # it under the terms of the GNU General Public License version 2 as | ||
| 13 | # published by the Free Software Foundation. | ||
| 14 | # | ||
| 15 | # This program is distributed in the hope that it will be useful, | ||
| 16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | # GNU General Public License for more details. | ||
| 19 | # | ||
| 20 | # You should have received a copy of the GNU General Public License along | ||
| 21 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 22 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 23 | # | ||
| 24 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 25 | |||
| 26 | import os, re | ||
| 27 | import sys | ||
| 28 | import bb | ||
| 29 | from bb import data | ||
| 30 | from bb.fetch import Fetch | ||
| 31 | from bb.fetch import FetchError | ||
| 32 | from bb.fetch import MissingParameterError | ||
| 33 | from bb.fetch import runfetchcmd | ||
| 34 | |||
| 35 | class Svn(Fetch): | ||
| 36 | """Class to fetch a module or modules from svn repositories""" | ||
| 37 | def supports(self, url, ud, d): | ||
| 38 | """ | ||
| 39 | Check to see if a given url can be fetched with svn. | ||
| 40 | """ | ||
| 41 | return ud.type in ['svn'] | ||
| 42 | |||
| 43 | def localpath(self, url, ud, d): | ||
| 44 | if not "module" in ud.parm: | ||
| 45 | raise MissingParameterError("svn method needs a 'module' parameter") | ||
| 46 | |||
| 47 | ud.module = ud.parm["module"] | ||
| 48 | |||
| 49 | # Create paths to svn checkouts | ||
| 50 | relpath = ud.path | ||
| 51 | if relpath.startswith('/'): | ||
| 52 | # Remove leading slash as os.path.join can't cope | ||
| 53 | relpath = relpath[1:] | ||
| 54 | ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath) | ||
| 55 | ud.moddir = os.path.join(ud.pkgdir, ud.module) | ||
| 56 | |||
| 57 | if 'rev' in ud.parm: | ||
| 58 | ud.date = "" | ||
| 59 | ud.revision = ud.parm['rev'] | ||
| 60 | elif 'date' in ud.date: | ||
| 61 | ud.date = ud.parm['date'] | ||
| 62 | ud.revision = "" | ||
| 63 | else: | ||
| 64 | # | ||
| 65 | # ***Nasty hack*** | ||
| 66 | # If DATE in unexpanded PV, use ud.date (which is set from SRCDATE) | ||
| 67 | # Should warn people to switch to SRCREV here | ||
| 68 | # | ||
| 69 | pv = data.getVar("PV", d, 0) | ||
| 70 | if "DATE" in pv: | ||
| 71 | ud.revision = "" | ||
| 72 | else: | ||
| 73 | rev = Fetch.srcrev_internal_helper(ud, d) | ||
| 74 | if rev is True: | ||
| 75 | ud.revision = self.latest_revision(url, ud, d) | ||
| 76 | ud.date = "" | ||
| 77 | elif rev: | ||
| 78 | ud.revision = rev | ||
| 79 | ud.date = "" | ||
| 80 | else: | ||
| 81 | ud.revision = "" | ||
| 82 | |||
| 83 | ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d) | ||
| 84 | |||
| 85 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 86 | |||
| 87 | def _buildsvncommand(self, ud, d, command): | ||
| 88 | """ | ||
| 89 | Build up an svn commandline based on ud | ||
| 90 | command is "fetch", "update", "info" | ||
| 91 | """ | ||
| 92 | |||
| 93 | basecmd = data.expand('${FETCHCMD_svn}', d) | ||
| 94 | |||
| 95 | proto = "svn" | ||
| 96 | if "proto" in ud.parm: | ||
| 97 | proto = ud.parm["proto"] | ||
| 98 | |||
| 99 | svn_rsh = None | ||
| 100 | if proto == "svn+ssh" and "rsh" in ud.parm: | ||
| 101 | svn_rsh = ud.parm["rsh"] | ||
| 102 | |||
| 103 | svnroot = ud.host + ud.path | ||
| 104 | |||
| 105 | # either use the revision, or SRCDATE in braces, | ||
| 106 | options = [] | ||
| 107 | |||
| 108 | if ud.user: | ||
| 109 | options.append("--username %s" % ud.user) | ||
| 110 | |||
| 111 | if ud.pswd: | ||
| 112 | options.append("--password %s" % ud.pswd) | ||
| 113 | |||
| 114 | if command is "info": | ||
| 115 | svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module) | ||
| 116 | else: | ||
| 117 | if ud.revision: | ||
| 118 | options.append("-r %s" % ud.revision) | ||
| 119 | elif ud.date: | ||
| 120 | options.append("-r {%s}" % ud.date) | ||
| 121 | |||
| 122 | if command is "fetch": | ||
| 123 | svncmd = "%s co %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, ud.module) | ||
| 124 | elif command is "update": | ||
| 125 | svncmd = "%s update %s" % (basecmd, " ".join(options)) | ||
| 126 | else: | ||
| 127 | raise FetchError("Invalid svn command %s" % command) | ||
| 128 | |||
| 129 | if svn_rsh: | ||
| 130 | svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd) | ||
| 131 | |||
| 132 | return svncmd | ||
| 133 | |||
| 134 | def go(self, loc, ud, d): | ||
| 135 | """Fetch url""" | ||
| 136 | |||
| 137 | # try to use the tarball stash | ||
| 138 | if Fetch.try_mirror(d, ud.localfile): | ||
| 139 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping svn checkout." % ud.localpath) | ||
| 140 | return | ||
| 141 | |||
| 142 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") | ||
| 143 | |||
| 144 | if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK): | ||
| 145 | svnupdatecmd = self._buildsvncommand(ud, d, "update") | ||
| 146 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | ||
| 147 | # update sources there | ||
| 148 | os.chdir(ud.moddir) | ||
| 149 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnupdatecmd) | ||
| 150 | runfetchcmd(svnupdatecmd, d) | ||
| 151 | else: | ||
| 152 | svnfetchcmd = self._buildsvncommand(ud, d, "fetch") | ||
| 153 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
| 154 | # check out sources there | ||
| 155 | bb.mkdirhier(ud.pkgdir) | ||
| 156 | os.chdir(ud.pkgdir) | ||
| 157 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnfetchcmd) | ||
| 158 | runfetchcmd(svnfetchcmd, d) | ||
| 159 | |||
| 160 | os.chdir(ud.pkgdir) | ||
| 161 | # tar them up to a defined filename | ||
| 162 | try: | ||
| 163 | runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d) | ||
| 164 | except: | ||
| 165 | t, v, tb = sys.exc_info() | ||
| 166 | try: | ||
| 167 | os.unlink(ud.localpath) | ||
| 168 | except OSError: | ||
| 169 | pass | ||
| 170 | raise t, v, tb | ||
| 171 | |||
| 172 | def suppports_srcrev(self): | ||
| 173 | return True | ||
| 174 | |||
| 175 | def _revision_key(self, url, ud, d): | ||
| 176 | """ | ||
| 177 | Return a unique key for the url | ||
| 178 | """ | ||
| 179 | return "svn:" + ud.moddir | ||
| 180 | |||
| 181 | def _latest_revision(self, url, ud, d): | ||
| 182 | """ | ||
| 183 | Return the latest upstream revision number | ||
| 184 | """ | ||
| 185 | bb.msg.debug(2, bb.msg.domain.Fetcher, "SVN fetcher hitting network for %s" % url) | ||
| 186 | |||
| 187 | output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True) | ||
| 188 | |||
| 189 | revision = None | ||
| 190 | for line in output.splitlines(): | ||
| 191 | if "Last Changed Rev" in line: | ||
| 192 | revision = line.split(":")[1].strip() | ||
| 193 | |||
| 194 | return revision | ||
| 195 | |||
| 196 | def _sortable_revision(self, url, ud, d): | ||
| 197 | """ | ||
| 198 | Return a sortable revision number which in our case is the revision number | ||
| 199 | """ | ||
| 200 | |||
| 201 | return self._build_revision(url, ud, d) | ||
| 202 | |||
| 203 | def _build_revision(self, url, ud, d): | ||
| 204 | return ud.revision | ||
diff --git a/bitbake-dev/lib/bb/fetch/wget.py b/bitbake-dev/lib/bb/fetch/wget.py new file mode 100644 index 0000000000..739d5a1bc6 --- /dev/null +++ b/bitbake-dev/lib/bb/fetch/wget.py | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'Fetch' implementations | ||
| 5 | |||
| 6 | Classes for obtaining upstream sources for the | ||
| 7 | BitBake build tools. | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | # | ||
| 26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 27 | |||
| 28 | import os, re | ||
| 29 | import bb | ||
| 30 | from bb import data | ||
| 31 | from bb.fetch import Fetch | ||
| 32 | from bb.fetch import FetchError | ||
| 33 | from bb.fetch import uri_replace | ||
| 34 | |||
| 35 | class Wget(Fetch): | ||
| 36 | """Class to fetch urls via 'wget'""" | ||
| 37 | def supports(self, url, ud, d): | ||
| 38 | """ | ||
| 39 | Check to see if a given url can be fetched with cvs. | ||
| 40 | """ | ||
| 41 | return ud.type in ['http','https','ftp'] | ||
| 42 | |||
| 43 | def localpath(self, url, ud, d): | ||
| 44 | |||
| 45 | url = bb.encodeurl([ud.type, ud.host, ud.path, ud.user, ud.pswd, {}]) | ||
| 46 | ud.basename = os.path.basename(ud.path) | ||
| 47 | ud.localfile = data.expand(os.path.basename(url), d) | ||
| 48 | |||
| 49 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
| 50 | |||
| 51 | def go(self, uri, ud, d, checkonly = False): | ||
| 52 | """Fetch urls""" | ||
| 53 | |||
| 54 | def fetch_uri(uri, ud, d): | ||
| 55 | if checkonly: | ||
| 56 | fetchcmd = data.getVar("CHECKCOMMAND", d, 1) | ||
| 57 | elif os.path.exists(ud.localpath): | ||
| 58 | # file exists, but we didnt complete it.. trying again.. | ||
| 59 | fetchcmd = data.getVar("RESUMECOMMAND", d, 1) | ||
| 60 | else: | ||
| 61 | fetchcmd = data.getVar("FETCHCOMMAND", d, 1) | ||
| 62 | |||
| 63 | bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) | ||
| 64 | fetchcmd = fetchcmd.replace("${URI}", uri) | ||
| 65 | fetchcmd = fetchcmd.replace("${FILE}", ud.basename) | ||
| 66 | bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) | ||
| 67 | ret = os.system(fetchcmd) | ||
| 68 | if ret != 0: | ||
| 69 | return False | ||
| 70 | |||
| 71 | # Sanity check since wget can pretend it succeed when it didn't | ||
| 72 | # Also, this used to happen if sourceforge sent us to the mirror page | ||
| 73 | if not os.path.exists(ud.localpath): | ||
| 74 | bb.msg.debug(2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath)) | ||
| 75 | return False | ||
| 76 | |||
| 77 | return True | ||
| 78 | |||
| 79 | localdata = data.createCopy(d) | ||
| 80 | data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) | ||
| 81 | data.update_data(localdata) | ||
| 82 | |||
| 83 | premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ] | ||
| 84 | for (find, replace) in premirrors: | ||
| 85 | newuri = uri_replace(uri, find, replace, d) | ||
| 86 | if newuri != uri: | ||
| 87 | if fetch_uri(newuri, ud, localdata): | ||
| 88 | return True | ||
| 89 | |||
| 90 | if fetch_uri(uri, ud, localdata): | ||
| 91 | return True | ||
| 92 | |||
| 93 | # try mirrors | ||
| 94 | mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ] | ||
| 95 | for (find, replace) in mirrors: | ||
| 96 | newuri = uri_replace(uri, find, replace, d) | ||
| 97 | if newuri != uri: | ||
| 98 | if fetch_uri(newuri, ud, localdata): | ||
| 99 | return True | ||
| 100 | |||
| 101 | raise FetchError(uri) | ||
| 102 | |||
| 103 | |||
| 104 | def checkstatus(self, uri, ud, d): | ||
| 105 | return self.go(uri, ud, d, True) | ||
diff --git a/bitbake-dev/lib/bb/manifest.py b/bitbake-dev/lib/bb/manifest.py new file mode 100644 index 0000000000..4e4b7d98ec --- /dev/null +++ b/bitbake-dev/lib/bb/manifest.py | |||
| @@ -0,0 +1,144 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 5 | # | ||
| 6 | # This program is free software; you can redistribute it and/or modify | ||
| 7 | # it under the terms of the GNU General Public License version 2 as | ||
| 8 | # published by the Free Software Foundation. | ||
| 9 | # | ||
| 10 | # This program is distributed in the hope that it will be useful, | ||
| 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | # GNU General Public License for more details. | ||
| 14 | # | ||
| 15 | # You should have received a copy of the GNU General Public License along | ||
| 16 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 17 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 18 | |||
| 19 | import os, sys | ||
| 20 | import bb, bb.data | ||
| 21 | |||
| 22 | def getfields(line): | ||
| 23 | fields = {} | ||
| 24 | fieldmap = ( "pkg", "src", "dest", "type", "mode", "uid", "gid", "major", "minor", "start", "inc", "count" ) | ||
| 25 | for f in xrange(len(fieldmap)): | ||
| 26 | fields[fieldmap[f]] = None | ||
| 27 | |||
| 28 | if not line: | ||
| 29 | return None | ||
| 30 | |||
| 31 | splitline = line.split() | ||
| 32 | if not len(splitline): | ||
| 33 | return None | ||
| 34 | |||
| 35 | try: | ||
| 36 | for f in xrange(len(fieldmap)): | ||
| 37 | if splitline[f] == '-': | ||
| 38 | continue | ||
| 39 | fields[fieldmap[f]] = splitline[f] | ||
| 40 | except IndexError: | ||
| 41 | pass | ||
| 42 | return fields | ||
| 43 | |||
| 44 | def parse (mfile, d): | ||
| 45 | manifest = [] | ||
| 46 | while 1: | ||
| 47 | line = mfile.readline() | ||
| 48 | if not line: | ||
| 49 | break | ||
| 50 | if line.startswith("#"): | ||
| 51 | continue | ||
| 52 | fields = getfields(line) | ||
| 53 | if not fields: | ||
| 54 | continue | ||
| 55 | manifest.append(fields) | ||
| 56 | return manifest | ||
| 57 | |||
| 58 | def emit (func, manifest, d): | ||
| 59 | #str = "%s () {\n" % func | ||
| 60 | str = "" | ||
| 61 | for line in manifest: | ||
| 62 | emittedline = emit_line(func, line, d) | ||
| 63 | if not emittedline: | ||
| 64 | continue | ||
| 65 | str += emittedline + "\n" | ||
| 66 | # str += "}\n" | ||
| 67 | return str | ||
| 68 | |||
| 69 | def mangle (func, line, d): | ||
| 70 | import copy | ||
| 71 | newline = copy.copy(line) | ||
| 72 | src = bb.data.expand(newline["src"], d) | ||
| 73 | |||
| 74 | if src: | ||
| 75 | if not os.path.isabs(src): | ||
| 76 | src = "${WORKDIR}/" + src | ||
| 77 | |||
| 78 | dest = newline["dest"] | ||
| 79 | if not dest: | ||
| 80 | return | ||
| 81 | |||
| 82 | if dest.startswith("/"): | ||
| 83 | dest = dest[1:] | ||
| 84 | |||
| 85 | if func is "do_install": | ||
| 86 | dest = "${D}/" + dest | ||
| 87 | |||
| 88 | elif func is "do_populate": | ||
| 89 | dest = "${WORKDIR}/install/" + newline["pkg"] + "/" + dest | ||
| 90 | |||
| 91 | elif func is "do_stage": | ||
| 92 | varmap = {} | ||
| 93 | varmap["${bindir}"] = "${STAGING_DIR}/${HOST_SYS}/bin" | ||
| 94 | varmap["${libdir}"] = "${STAGING_DIR}/${HOST_SYS}/lib" | ||
| 95 | varmap["${includedir}"] = "${STAGING_DIR}/${HOST_SYS}/include" | ||
| 96 | varmap["${datadir}"] = "${STAGING_DATADIR}" | ||
| 97 | |||
| 98 | matched = 0 | ||
| 99 | for key in varmap.keys(): | ||
| 100 | if dest.startswith(key): | ||
| 101 | dest = varmap[key] + "/" + dest[len(key):] | ||
| 102 | matched = 1 | ||
| 103 | if not matched: | ||
| 104 | newline = None | ||
| 105 | return | ||
| 106 | else: | ||
| 107 | newline = None | ||
| 108 | return | ||
| 109 | |||
| 110 | newline["src"] = src | ||
| 111 | newline["dest"] = dest | ||
| 112 | return newline | ||
| 113 | |||
| 114 | def emit_line (func, line, d): | ||
| 115 | import copy | ||
| 116 | newline = copy.deepcopy(line) | ||
| 117 | newline = mangle(func, newline, d) | ||
| 118 | if not newline: | ||
| 119 | return None | ||
| 120 | |||
| 121 | str = "" | ||
| 122 | type = newline["type"] | ||
| 123 | mode = newline["mode"] | ||
| 124 | src = newline["src"] | ||
| 125 | dest = newline["dest"] | ||
| 126 | if type is "d": | ||
| 127 | str = "install -d " | ||
| 128 | if mode: | ||
| 129 | str += "-m %s " % mode | ||
| 130 | str += dest | ||
| 131 | elif type is "f": | ||
| 132 | if not src: | ||
| 133 | return None | ||
| 134 | if dest.endswith("/"): | ||
| 135 | str = "install -d " | ||
| 136 | str += dest + "\n" | ||
| 137 | str += "install " | ||
| 138 | else: | ||
| 139 | str = "install -D " | ||
| 140 | if mode: | ||
| 141 | str += "-m %s " % mode | ||
| 142 | str += src + " " + dest | ||
| 143 | del newline | ||
| 144 | return str | ||
diff --git a/bitbake-dev/lib/bb/methodpool.py b/bitbake-dev/lib/bb/methodpool.py new file mode 100644 index 0000000000..f43c4a0580 --- /dev/null +++ b/bitbake-dev/lib/bb/methodpool.py | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # | ||
| 5 | # Copyright (C) 2006 Holger Hans Peter Freyther | ||
| 6 | # | ||
| 7 | # This program is free software; you can redistribute it and/or modify | ||
| 8 | # it under the terms of the GNU General Public License version 2 as | ||
| 9 | # published by the Free Software Foundation. | ||
| 10 | # | ||
| 11 | # This program is distributed in the hope that it will be useful, | ||
| 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | # GNU General Public License for more details. | ||
| 15 | # | ||
| 16 | # You should have received a copy of the GNU General Public License along | ||
| 17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | |||
| 20 | |||
| 21 | """ | ||
| 22 | What is a method pool? | ||
| 23 | |||
| 24 | BitBake has a global method scope where .bb, .inc and .bbclass | ||
| 25 | files can install methods. These methods are parsed from strings. | ||
| 26 | To avoid recompiling and executing these string we introduce | ||
| 27 | a method pool to do this task. | ||
| 28 | |||
| 29 | This pool will be used to compile and execute the functions. It | ||
| 30 | will be smart enough to | ||
| 31 | """ | ||
| 32 | |||
| 33 | from bb.utils import better_compile, better_exec | ||
| 34 | from bb import error | ||
| 35 | |||
| 36 | # A dict of modules we have handled | ||
| 37 | # it is the number of .bbclasses + x in size | ||
| 38 | _parsed_methods = { } | ||
| 39 | _parsed_fns = { } | ||
| 40 | |||
| 41 | def insert_method(modulename, code, fn): | ||
| 42 | """ | ||
| 43 | Add code of a module should be added. The methods | ||
| 44 | will be simply added, no checking will be done | ||
| 45 | """ | ||
| 46 | comp = better_compile(code, "<bb>", fn ) | ||
| 47 | better_exec(comp, __builtins__, code, fn) | ||
| 48 | |||
| 49 | # now some instrumentation | ||
| 50 | code = comp.co_names | ||
| 51 | for name in code: | ||
| 52 | if name in ['None', 'False']: | ||
| 53 | continue | ||
| 54 | elif name in _parsed_fns and not _parsed_fns[name] == modulename: | ||
| 55 | error( "Error Method already seen: %s in' %s' now in '%s'" % (name, _parsed_fns[name], modulename)) | ||
| 56 | else: | ||
| 57 | _parsed_fns[name] = modulename | ||
| 58 | |||
| 59 | def check_insert_method(modulename, code, fn): | ||
| 60 | """ | ||
| 61 | Add the code if it wasnt added before. The module | ||
| 62 | name will be used for that | ||
| 63 | |||
| 64 | Variables: | ||
| 65 | @modulename a short name e.g. base.bbclass | ||
| 66 | @code The actual python code | ||
| 67 | @fn The filename from the outer file | ||
| 68 | """ | ||
| 69 | if not modulename in _parsed_methods: | ||
| 70 | return insert_method(modulename, code, fn) | ||
| 71 | _parsed_methods[modulename] = 1 | ||
| 72 | |||
| 73 | def parsed_module(modulename): | ||
| 74 | """ | ||
| 75 | Inform me file xyz was parsed | ||
| 76 | """ | ||
| 77 | return modulename in _parsed_methods | ||
| 78 | |||
| 79 | |||
| 80 | def get_parsed_dict(): | ||
| 81 | """ | ||
| 82 | shortcut | ||
| 83 | """ | ||
| 84 | return _parsed_methods | ||
diff --git a/bitbake-dev/lib/bb/msg.py b/bitbake-dev/lib/bb/msg.py new file mode 100644 index 0000000000..7aa0a27d25 --- /dev/null +++ b/bitbake-dev/lib/bb/msg.py | |||
| @@ -0,0 +1,125 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake 'msg' implementation | ||
| 5 | |||
| 6 | Message handling infrastructure for bitbake | ||
| 7 | |||
| 8 | """ | ||
| 9 | |||
| 10 | # Copyright (C) 2006 Richard Purdie | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | |||
| 25 | import sys, os, re, bb | ||
| 26 | from bb import utils, event | ||
| 27 | |||
| 28 | debug_level = {} | ||
| 29 | |||
| 30 | verbose = False | ||
| 31 | |||
| 32 | domain = bb.utils.Enum( | ||
| 33 | 'Build', | ||
| 34 | 'Cache', | ||
| 35 | 'Collection', | ||
| 36 | 'Data', | ||
| 37 | 'Depends', | ||
| 38 | 'Fetcher', | ||
| 39 | 'Parsing', | ||
| 40 | 'PersistData', | ||
| 41 | 'Provider', | ||
| 42 | 'RunQueue', | ||
| 43 | 'TaskData', | ||
| 44 | 'Util') | ||
| 45 | |||
| 46 | |||
| 47 | class MsgBase(bb.event.Event): | ||
| 48 | """Base class for messages""" | ||
| 49 | |||
| 50 | def __init__(self, msg, d ): | ||
| 51 | self._message = msg | ||
| 52 | event.Event.__init__(self, d) | ||
| 53 | |||
| 54 | class MsgDebug(MsgBase): | ||
| 55 | """Debug Message""" | ||
| 56 | |||
| 57 | class MsgNote(MsgBase): | ||
| 58 | """Note Message""" | ||
| 59 | |||
| 60 | class MsgWarn(MsgBase): | ||
| 61 | """Warning Message""" | ||
| 62 | |||
| 63 | class MsgError(MsgBase): | ||
| 64 | """Error Message""" | ||
| 65 | |||
| 66 | class MsgFatal(MsgBase): | ||
| 67 | """Fatal Message""" | ||
| 68 | |||
| 69 | class MsgPlain(MsgBase): | ||
| 70 | """General output""" | ||
| 71 | |||
| 72 | # | ||
| 73 | # Message control functions | ||
| 74 | # | ||
| 75 | |||
| 76 | def set_debug_level(level): | ||
| 77 | bb.msg.debug_level = {} | ||
| 78 | for domain in bb.msg.domain: | ||
| 79 | bb.msg.debug_level[domain] = level | ||
| 80 | bb.msg.debug_level['default'] = level | ||
| 81 | |||
| 82 | def set_verbose(level): | ||
| 83 | bb.msg.verbose = level | ||
| 84 | |||
| 85 | def set_debug_domains(domains): | ||
| 86 | for domain in domains: | ||
| 87 | found = False | ||
| 88 | for ddomain in bb.msg.domain: | ||
| 89 | if domain == str(ddomain): | ||
| 90 | bb.msg.debug_level[ddomain] = bb.msg.debug_level[ddomain] + 1 | ||
| 91 | found = True | ||
| 92 | if not found: | ||
| 93 | bb.msg.warn(None, "Logging domain %s is not valid, ignoring" % domain) | ||
| 94 | |||
| 95 | # | ||
| 96 | # Message handling functions | ||
| 97 | # | ||
| 98 | |||
| 99 | def debug(level, domain, msg, fn = None): | ||
| 100 | if not domain: | ||
| 101 | domain = 'default' | ||
| 102 | if debug_level[domain] >= level: | ||
| 103 | bb.event.fire(MsgDebug(msg, None)) | ||
| 104 | |||
| 105 | def note(level, domain, msg, fn = None): | ||
| 106 | if not domain: | ||
| 107 | domain = 'default' | ||
| 108 | if level == 1 or verbose or debug_level[domain] >= 1: | ||
| 109 | bb.event.fire(MsgNote(msg, None)) | ||
| 110 | |||
| 111 | def warn(domain, msg, fn = None): | ||
| 112 | bb.event.fire(MsgWarn(msg, None)) | ||
| 113 | |||
| 114 | def error(domain, msg, fn = None): | ||
| 115 | bb.event.fire(MsgError(msg, None)) | ||
| 116 | print 'ERROR: ' + msg | ||
| 117 | |||
| 118 | def fatal(domain, msg, fn = None): | ||
| 119 | bb.event.fire(MsgFatal(msg, None)) | ||
| 120 | print 'FATAL: ' + msg | ||
| 121 | sys.exit(1) | ||
| 122 | |||
| 123 | def plain(msg, fn = None): | ||
| 124 | bb.event.fire(MsgPlain(msg, None)) | ||
| 125 | |||
diff --git a/bitbake-dev/lib/bb/parse/__init__.py b/bitbake-dev/lib/bb/parse/__init__.py new file mode 100644 index 0000000000..3c9ba8e6da --- /dev/null +++ b/bitbake-dev/lib/bb/parse/__init__.py | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | """ | ||
| 2 | BitBake Parsers | ||
| 3 | |||
| 4 | File parsers for the BitBake build tools. | ||
| 5 | |||
| 6 | """ | ||
| 7 | |||
| 8 | |||
| 9 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 10 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | # | ||
| 25 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 26 | |||
| 27 | __all__ = [ 'ParseError', 'SkipPackage', 'cached_mtime', 'mark_dependency', | ||
| 28 | 'supports', 'handle', 'init' ] | ||
| 29 | handlers = [] | ||
| 30 | |||
| 31 | import bb, os | ||
| 32 | |||
| 33 | class ParseError(Exception): | ||
| 34 | """Exception raised when parsing fails""" | ||
| 35 | |||
| 36 | class SkipPackage(Exception): | ||
| 37 | """Exception raised to skip this package""" | ||
| 38 | |||
| 39 | __mtime_cache = {} | ||
| 40 | def cached_mtime(f): | ||
| 41 | if not __mtime_cache.has_key(f): | ||
| 42 | __mtime_cache[f] = os.stat(f)[8] | ||
| 43 | return __mtime_cache[f] | ||
| 44 | |||
| 45 | def cached_mtime_noerror(f): | ||
| 46 | if not __mtime_cache.has_key(f): | ||
| 47 | try: | ||
| 48 | __mtime_cache[f] = os.stat(f)[8] | ||
| 49 | except OSError: | ||
| 50 | return 0 | ||
| 51 | return __mtime_cache[f] | ||
| 52 | |||
| 53 | def mark_dependency(d, f): | ||
| 54 | if f.startswith('./'): | ||
| 55 | f = "%s/%s" % (os.getcwd(), f[2:]) | ||
| 56 | deps = bb.data.getVar('__depends', d) or [] | ||
| 57 | deps.append( (f, cached_mtime(f)) ) | ||
| 58 | bb.data.setVar('__depends', deps, d) | ||
| 59 | |||
| 60 | def supports(fn, data): | ||
| 61 | """Returns true if we have a handler for this file, false otherwise""" | ||
| 62 | for h in handlers: | ||
| 63 | if h['supports'](fn, data): | ||
| 64 | return 1 | ||
| 65 | return 0 | ||
| 66 | |||
| 67 | def handle(fn, data, include = 0): | ||
| 68 | """Call the handler that is appropriate for this file""" | ||
| 69 | for h in handlers: | ||
| 70 | if h['supports'](fn, data): | ||
| 71 | return h['handle'](fn, data, include) | ||
| 72 | raise ParseError("%s is not a BitBake file" % fn) | ||
| 73 | |||
| 74 | def init(fn, data): | ||
| 75 | for h in handlers: | ||
| 76 | if h['supports'](fn): | ||
| 77 | return h['init'](data) | ||
| 78 | |||
| 79 | |||
| 80 | from parse_py import __version__, ConfHandler, BBHandler | ||
diff --git a/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py b/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py new file mode 100644 index 0000000000..e9b950acbd --- /dev/null +++ b/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py | |||
| @@ -0,0 +1,416 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | """ | ||
| 5 | class for handling .bb files | ||
| 6 | |||
| 7 | Reads a .bb file and obtains its metadata | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | |||
| 12 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 13 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 14 | # | ||
| 15 | # This program is free software; you can redistribute it and/or modify | ||
| 16 | # it under the terms of the GNU General Public License version 2 as | ||
| 17 | # published by the Free Software Foundation. | ||
| 18 | # | ||
| 19 | # This program is distributed in the hope that it will be useful, | ||
| 20 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 21 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 22 | # GNU General Public License for more details. | ||
| 23 | # | ||
| 24 | # You should have received a copy of the GNU General Public License along | ||
| 25 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 26 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 27 | |||
| 28 | import re, bb, os, sys, time | ||
| 29 | import bb.fetch, bb.build, bb.utils | ||
| 30 | from bb import data, fetch, methodpool | ||
| 31 | |||
| 32 | from ConfHandler import include, localpath, obtain, init | ||
| 33 | from bb.parse import ParseError | ||
| 34 | |||
| 35 | __func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" ) | ||
| 36 | __inherit_regexp__ = re.compile( r"inherit\s+(.+)" ) | ||
| 37 | __export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" ) | ||
| 38 | __addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*") | ||
| 39 | __addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" ) | ||
| 40 | __def_regexp__ = re.compile( r"def\s+(\w+).*:" ) | ||
| 41 | __python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" ) | ||
| 42 | __word__ = re.compile(r"\S+") | ||
| 43 | |||
| 44 | __infunc__ = "" | ||
| 45 | __inpython__ = False | ||
| 46 | __body__ = [] | ||
| 47 | __classname__ = "" | ||
| 48 | classes = [ None, ] | ||
| 49 | |||
| 50 | # We need to indicate EOF to the feeder. This code is so messy that | ||
| 51 | # factoring it out to a close_parse_file method is out of question. | ||
| 52 | # We will use the IN_PYTHON_EOF as an indicator to just close the method | ||
| 53 | # | ||
| 54 | # The two parts using it are tightly integrated anyway | ||
| 55 | IN_PYTHON_EOF = -9999999999999 | ||
| 56 | |||
| 57 | __parsed_methods__ = methodpool.get_parsed_dict() | ||
| 58 | |||
| 59 | def supports(fn, d): | ||
| 60 | localfn = localpath(fn, d) | ||
| 61 | return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc" | ||
| 62 | |||
| 63 | def inherit(files, d): | ||
| 64 | __inherit_cache = data.getVar('__inherit_cache', d) or [] | ||
| 65 | fn = "" | ||
| 66 | lineno = 0 | ||
| 67 | files = data.expand(files, d) | ||
| 68 | for file in files: | ||
| 69 | if file[0] != "/" and file[-8:] != ".bbclass": | ||
| 70 | file = os.path.join('classes', '%s.bbclass' % file) | ||
| 71 | |||
| 72 | if not file in __inherit_cache: | ||
| 73 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file)) | ||
| 74 | __inherit_cache.append( file ) | ||
| 75 | data.setVar('__inherit_cache', __inherit_cache, d) | ||
| 76 | include(fn, file, d, "inherit") | ||
| 77 | __inherit_cache = data.getVar('__inherit_cache', d) or [] | ||
| 78 | |||
| 79 | def handle(fn, d, include = 0): | ||
| 80 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__ | ||
| 81 | __body__ = [] | ||
| 82 | __infunc__ = "" | ||
| 83 | __classname__ = "" | ||
| 84 | __residue__ = [] | ||
| 85 | |||
| 86 | if include == 0: | ||
| 87 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)") | ||
| 88 | else: | ||
| 89 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") | ||
| 90 | |||
| 91 | (root, ext) = os.path.splitext(os.path.basename(fn)) | ||
| 92 | base_name = "%s%s" % (root,ext) | ||
| 93 | init(d) | ||
| 94 | |||
| 95 | if ext == ".bbclass": | ||
| 96 | __classname__ = root | ||
| 97 | classes.append(__classname__) | ||
| 98 | __inherit_cache = data.getVar('__inherit_cache', d) or [] | ||
| 99 | if not fn in __inherit_cache: | ||
| 100 | __inherit_cache.append(fn) | ||
| 101 | data.setVar('__inherit_cache', __inherit_cache, d) | ||
| 102 | |||
| 103 | if include != 0: | ||
| 104 | oldfile = data.getVar('FILE', d) | ||
| 105 | else: | ||
| 106 | oldfile = None | ||
| 107 | |||
| 108 | fn = obtain(fn, d) | ||
| 109 | bbpath = (data.getVar('BBPATH', d, 1) or '').split(':') | ||
| 110 | if not os.path.isabs(fn): | ||
| 111 | f = None | ||
| 112 | for p in bbpath: | ||
| 113 | j = os.path.join(p, fn) | ||
| 114 | if os.access(j, os.R_OK): | ||
| 115 | abs_fn = j | ||
| 116 | f = open(j, 'r') | ||
| 117 | break | ||
| 118 | if f is None: | ||
| 119 | raise IOError("file %s not found" % fn) | ||
| 120 | else: | ||
| 121 | f = open(fn,'r') | ||
| 122 | abs_fn = fn | ||
| 123 | |||
| 124 | if ext != ".bbclass": | ||
| 125 | dname = os.path.dirname(abs_fn) | ||
| 126 | if dname not in bbpath: | ||
| 127 | bbpath.insert(0, dname) | ||
| 128 | data.setVar('BBPATH', ":".join(bbpath), d) | ||
| 129 | |||
| 130 | if include: | ||
| 131 | bb.parse.mark_dependency(d, abs_fn) | ||
| 132 | |||
| 133 | if ext != ".bbclass": | ||
| 134 | data.setVar('FILE', fn, d) | ||
| 135 | |||
| 136 | lineno = 0 | ||
| 137 | while 1: | ||
| 138 | lineno = lineno + 1 | ||
| 139 | s = f.readline() | ||
| 140 | if not s: break | ||
| 141 | s = s.rstrip() | ||
| 142 | feeder(lineno, s, fn, base_name, d) | ||
| 143 | if __inpython__: | ||
| 144 | # add a blank line to close out any python definition | ||
| 145 | feeder(IN_PYTHON_EOF, "", fn, base_name, d) | ||
| 146 | if ext == ".bbclass": | ||
| 147 | classes.remove(__classname__) | ||
| 148 | else: | ||
| 149 | if include == 0: | ||
| 150 | data.expandKeys(d) | ||
| 151 | data.update_data(d) | ||
| 152 | anonqueue = data.getVar("__anonqueue", d, 1) or [] | ||
| 153 | body = [x['content'] for x in anonqueue] | ||
| 154 | flag = { 'python' : 1, 'func' : 1 } | ||
| 155 | data.setVar("__anonfunc", "\n".join(body), d) | ||
| 156 | data.setVarFlags("__anonfunc", flag, d) | ||
| 157 | from bb import build | ||
| 158 | try: | ||
| 159 | t = data.getVar('T', d) | ||
| 160 | data.setVar('T', '${TMPDIR}/anonfunc/', d) | ||
| 161 | build.exec_func("__anonfunc", d) | ||
| 162 | data.delVar('T', d) | ||
| 163 | if t: | ||
| 164 | data.setVar('T', t, d) | ||
| 165 | except Exception, e: | ||
| 166 | bb.msg.debug(1, bb.msg.domain.Parsing, "Exception when executing anonymous function: %s" % e) | ||
| 167 | raise | ||
| 168 | data.delVar("__anonqueue", d) | ||
| 169 | data.delVar("__anonfunc", d) | ||
| 170 | set_additional_vars(fn, d, include) | ||
| 171 | data.update_data(d) | ||
| 172 | |||
| 173 | all_handlers = {} | ||
| 174 | for var in data.getVar('__BBHANDLERS', d) or []: | ||
| 175 | # try to add the handler | ||
| 176 | handler = data.getVar(var,d) | ||
| 177 | bb.event.register(var, handler) | ||
| 178 | |||
| 179 | tasklist = data.getVar('__BBTASKS', d) or [] | ||
| 180 | bb.build.add_tasks(tasklist, d) | ||
| 181 | |||
| 182 | bbpath.pop(0) | ||
| 183 | if oldfile: | ||
| 184 | bb.data.setVar("FILE", oldfile, d) | ||
| 185 | |||
| 186 | # we have parsed the bb class now | ||
| 187 | if ext == ".bbclass" or ext == ".inc": | ||
| 188 | __parsed_methods__[base_name] = 1 | ||
| 189 | |||
| 190 | return d | ||
| 191 | |||
| 192 | def feeder(lineno, s, fn, root, d): | ||
| 193 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__ | ||
| 194 | if __infunc__: | ||
| 195 | if s == '}': | ||
| 196 | __body__.append('') | ||
| 197 | data.setVar(__infunc__, '\n'.join(__body__), d) | ||
| 198 | data.setVarFlag(__infunc__, "func", 1, d) | ||
| 199 | if __infunc__ == "__anonymous": | ||
| 200 | anonqueue = bb.data.getVar("__anonqueue", d) or [] | ||
| 201 | anonitem = {} | ||
| 202 | anonitem["content"] = bb.data.getVar("__anonymous", d) | ||
| 203 | anonitem["flags"] = bb.data.getVarFlags("__anonymous", d) | ||
| 204 | anonqueue.append(anonitem) | ||
| 205 | bb.data.setVar("__anonqueue", anonqueue, d) | ||
| 206 | bb.data.delVarFlags("__anonymous", d) | ||
| 207 | bb.data.delVar("__anonymous", d) | ||
| 208 | __infunc__ = "" | ||
| 209 | __body__ = [] | ||
| 210 | else: | ||
| 211 | __body__.append(s) | ||
| 212 | return | ||
| 213 | |||
| 214 | if __inpython__: | ||
| 215 | m = __python_func_regexp__.match(s) | ||
| 216 | if m and lineno != IN_PYTHON_EOF: | ||
| 217 | __body__.append(s) | ||
| 218 | return | ||
| 219 | else: | ||
| 220 | # Note we will add root to parsedmethods after having parse | ||
| 221 | # 'this' file. This means we will not parse methods from | ||
| 222 | # bb classes twice | ||
| 223 | if not root in __parsed_methods__: | ||
| 224 | text = '\n'.join(__body__) | ||
| 225 | methodpool.insert_method( root, text, fn ) | ||
| 226 | funcs = data.getVar('__functions__', d) or {} | ||
| 227 | if not funcs.has_key( root ): | ||
| 228 | funcs[root] = text | ||
| 229 | else: | ||
| 230 | funcs[root] = "%s\n%s" % (funcs[root], text) | ||
| 231 | |||
| 232 | data.setVar('__functions__', funcs, d) | ||
| 233 | __body__ = [] | ||
| 234 | __inpython__ = False | ||
| 235 | |||
| 236 | if lineno == IN_PYTHON_EOF: | ||
| 237 | return | ||
| 238 | |||
| 239 | # fall through | ||
| 240 | |||
| 241 | if s == '' or s[0] == '#': return # skip comments and empty lines | ||
| 242 | |||
| 243 | if s[-1] == '\\': | ||
| 244 | __residue__.append(s[:-1]) | ||
| 245 | return | ||
| 246 | |||
| 247 | s = "".join(__residue__) + s | ||
| 248 | __residue__ = [] | ||
| 249 | |||
| 250 | m = __func_start_regexp__.match(s) | ||
| 251 | if m: | ||
| 252 | __infunc__ = m.group("func") or "__anonymous" | ||
| 253 | key = __infunc__ | ||
| 254 | if data.getVar(key, d): | ||
| 255 | # clean up old version of this piece of metadata, as its | ||
| 256 | # flags could cause problems | ||
| 257 | data.setVarFlag(key, 'python', None, d) | ||
| 258 | data.setVarFlag(key, 'fakeroot', None, d) | ||
| 259 | if m.group("py") is not None: | ||
| 260 | data.setVarFlag(key, "python", "1", d) | ||
| 261 | else: | ||
| 262 | data.delVarFlag(key, "python", d) | ||
| 263 | if m.group("fr") is not None: | ||
| 264 | data.setVarFlag(key, "fakeroot", "1", d) | ||
| 265 | else: | ||
| 266 | data.delVarFlag(key, "fakeroot", d) | ||
| 267 | return | ||
| 268 | |||
| 269 | m = __def_regexp__.match(s) | ||
| 270 | if m: | ||
| 271 | __body__.append(s) | ||
| 272 | __inpython__ = True | ||
| 273 | return | ||
| 274 | |||
| 275 | m = __export_func_regexp__.match(s) | ||
| 276 | if m: | ||
| 277 | fns = m.group(1) | ||
| 278 | n = __word__.findall(fns) | ||
| 279 | for f in n: | ||
| 280 | allvars = [] | ||
| 281 | allvars.append(f) | ||
| 282 | allvars.append(classes[-1] + "_" + f) | ||
| 283 | |||
| 284 | vars = [[ allvars[0], allvars[1] ]] | ||
| 285 | if len(classes) > 1 and classes[-2] is not None: | ||
| 286 | allvars.append(classes[-2] + "_" + f) | ||
| 287 | vars = [] | ||
| 288 | vars.append([allvars[2], allvars[1]]) | ||
| 289 | vars.append([allvars[0], allvars[2]]) | ||
| 290 | |||
| 291 | for (var, calledvar) in vars: | ||
| 292 | if data.getVar(var, d) and not data.getVarFlag(var, 'export_func', d): | ||
| 293 | continue | ||
| 294 | |||
| 295 | if data.getVar(var, d): | ||
| 296 | data.setVarFlag(var, 'python', None, d) | ||
| 297 | data.setVarFlag(var, 'func', None, d) | ||
| 298 | |||
| 299 | for flag in [ "func", "python" ]: | ||
| 300 | if data.getVarFlag(calledvar, flag, d): | ||
| 301 | data.setVarFlag(var, flag, data.getVarFlag(calledvar, flag, d), d) | ||
| 302 | for flag in [ "dirs" ]: | ||
| 303 | if data.getVarFlag(var, flag, d): | ||
| 304 | data.setVarFlag(calledvar, flag, data.getVarFlag(var, flag, d), d) | ||
| 305 | |||
| 306 | if data.getVarFlag(calledvar, "python", d): | ||
| 307 | data.setVar(var, "\tbb.build.exec_func('" + calledvar + "', d)\n", d) | ||
| 308 | else: | ||
| 309 | data.setVar(var, "\t" + calledvar + "\n", d) | ||
| 310 | data.setVarFlag(var, 'export_func', '1', d) | ||
| 311 | |||
| 312 | return | ||
| 313 | |||
| 314 | m = __addtask_regexp__.match(s) | ||
| 315 | if m: | ||
| 316 | func = m.group("func") | ||
| 317 | before = m.group("before") | ||
| 318 | after = m.group("after") | ||
| 319 | if func is None: | ||
| 320 | return | ||
| 321 | var = "do_" + func | ||
| 322 | |||
| 323 | data.setVarFlag(var, "task", 1, d) | ||
| 324 | |||
| 325 | bbtasks = data.getVar('__BBTASKS', d) or [] | ||
| 326 | if not var in bbtasks: | ||
| 327 | bbtasks.append(var) | ||
| 328 | data.setVar('__BBTASKS', bbtasks, d) | ||
| 329 | |||
| 330 | existing = data.getVarFlag(var, "deps", d) or [] | ||
| 331 | if after is not None: | ||
| 332 | # set up deps for function | ||
| 333 | for entry in after.split(): | ||
| 334 | if entry not in existing: | ||
| 335 | existing.append(entry) | ||
| 336 | data.setVarFlag(var, "deps", existing, d) | ||
| 337 | if before is not None: | ||
| 338 | # set up things that depend on this func | ||
| 339 | for entry in before.split(): | ||
| 340 | existing = data.getVarFlag(entry, "deps", d) or [] | ||
| 341 | if var not in existing: | ||
| 342 | data.setVarFlag(entry, "deps", [var] + existing, d) | ||
| 343 | return | ||
| 344 | |||
| 345 | m = __addhandler_regexp__.match(s) | ||
| 346 | if m: | ||
| 347 | fns = m.group(1) | ||
| 348 | hs = __word__.findall(fns) | ||
| 349 | bbhands = data.getVar('__BBHANDLERS', d) or [] | ||
| 350 | for h in hs: | ||
| 351 | bbhands.append(h) | ||
| 352 | data.setVarFlag(h, "handler", 1, d) | ||
| 353 | data.setVar('__BBHANDLERS', bbhands, d) | ||
| 354 | return | ||
| 355 | |||
| 356 | m = __inherit_regexp__.match(s) | ||
| 357 | if m: | ||
| 358 | |||
| 359 | files = m.group(1) | ||
| 360 | n = __word__.findall(files) | ||
| 361 | inherit(n, d) | ||
| 362 | return | ||
| 363 | |||
| 364 | from bb.parse import ConfHandler | ||
| 365 | return ConfHandler.feeder(lineno, s, fn, d) | ||
| 366 | |||
| 367 | __pkgsplit_cache__={} | ||
| 368 | def vars_from_file(mypkg, d): | ||
| 369 | if not mypkg: | ||
| 370 | return (None, None, None) | ||
| 371 | if mypkg in __pkgsplit_cache__: | ||
| 372 | return __pkgsplit_cache__[mypkg] | ||
| 373 | |||
| 374 | myfile = os.path.splitext(os.path.basename(mypkg)) | ||
| 375 | parts = myfile[0].split('_') | ||
| 376 | __pkgsplit_cache__[mypkg] = parts | ||
| 377 | if len(parts) > 3: | ||
| 378 | raise ParseError("Unable to generate default variables from the filename: %s (too many underscores)" % mypkg) | ||
| 379 | exp = 3 - len(parts) | ||
| 380 | tmplist = [] | ||
| 381 | while exp != 0: | ||
| 382 | exp -= 1 | ||
| 383 | tmplist.append(None) | ||
| 384 | parts.extend(tmplist) | ||
| 385 | return parts | ||
| 386 | |||
| 387 | def set_additional_vars(file, d, include): | ||
| 388 | """Deduce rest of variables, e.g. ${A} out of ${SRC_URI}""" | ||
| 389 | |||
| 390 | return | ||
| 391 | # Nothing seems to use this variable | ||
| 392 | #bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file) | ||
| 393 | |||
| 394 | #src_uri = data.getVar('SRC_URI', d, 1) | ||
| 395 | #if not src_uri: | ||
| 396 | # return | ||
| 397 | |||
| 398 | #a = (data.getVar('A', d, 1) or '').split() | ||
| 399 | |||
| 400 | #from bb import fetch | ||
| 401 | #try: | ||
| 402 | # ud = fetch.init(src_uri.split(), d) | ||
| 403 | # a += fetch.localpaths(d, ud) | ||
| 404 | #except fetch.NoMethodError: | ||
| 405 | # pass | ||
| 406 | #except bb.MalformedUrl,e: | ||
| 407 | # raise ParseError("Unable to generate local paths for SRC_URI due to malformed uri: %s" % e) | ||
| 408 | #del fetch | ||
| 409 | |||
| 410 | #data.setVar('A', " ".join(a), d) | ||
| 411 | |||
| 412 | |||
| 413 | # Add us to the handlers list | ||
| 414 | from bb.parse import handlers | ||
| 415 | handlers.append({'supports': supports, 'handle': handle, 'init': init}) | ||
| 416 | del handlers | ||
diff --git a/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py b/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py new file mode 100644 index 0000000000..e6488bbe11 --- /dev/null +++ b/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py | |||
| @@ -0,0 +1,228 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | """ | ||
| 5 | class for handling configuration data files | ||
| 6 | |||
| 7 | Reads a .conf file and obtains its metadata | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 13 | # | ||
| 14 | # This program is free software; you can redistribute it and/or modify | ||
| 15 | # it under the terms of the GNU General Public License version 2 as | ||
| 16 | # published by the Free Software Foundation. | ||
| 17 | # | ||
| 18 | # This program is distributed in the hope that it will be useful, | ||
| 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | # GNU General Public License for more details. | ||
| 22 | # | ||
| 23 | # You should have received a copy of the GNU General Public License along | ||
| 24 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 25 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 26 | |||
| 27 | import re, bb.data, os, sys | ||
| 28 | from bb.parse import ParseError | ||
| 29 | |||
| 30 | #__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}]+)\s*(?P<colon>:)?(?P<ques>\?)?=\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$") | ||
| 31 | __config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}/]+)(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?\s*((?P<colon>:=)|(?P<ques>\?=)|(?P<append>\+=)|(?P<prepend>=\+)|(?P<predot>=\.)|(?P<postdot>\.=)|=)\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$") | ||
| 32 | __include_regexp__ = re.compile( r"include\s+(.+)" ) | ||
| 33 | __require_regexp__ = re.compile( r"require\s+(.+)" ) | ||
| 34 | __export_regexp__ = re.compile( r"export\s+(.+)" ) | ||
| 35 | |||
| 36 | def init(data): | ||
| 37 | if not bb.data.getVar('TOPDIR', data): | ||
| 38 | bb.data.setVar('TOPDIR', os.getcwd(), data) | ||
| 39 | if not bb.data.getVar('BBPATH', data): | ||
| 40 | bb.data.setVar('BBPATH', os.path.join(sys.prefix, 'share', 'bitbake'), data) | ||
| 41 | |||
| 42 | def supports(fn, d): | ||
| 43 | return localpath(fn, d)[-5:] == ".conf" | ||
| 44 | |||
| 45 | def localpath(fn, d): | ||
| 46 | if os.path.exists(fn): | ||
| 47 | return fn | ||
| 48 | |||
| 49 | if "://" not in fn: | ||
| 50 | return fn | ||
| 51 | |||
| 52 | localfn = None | ||
| 53 | try: | ||
| 54 | localfn = bb.fetch.localpath(fn, d, False) | ||
| 55 | except bb.MalformedUrl: | ||
| 56 | pass | ||
| 57 | |||
| 58 | if not localfn: | ||
| 59 | return fn | ||
| 60 | return localfn | ||
| 61 | |||
| 62 | def obtain(fn, data): | ||
| 63 | import sys, bb | ||
| 64 | fn = bb.data.expand(fn, data) | ||
| 65 | localfn = bb.data.expand(localpath(fn, data), data) | ||
| 66 | |||
| 67 | if localfn != fn: | ||
| 68 | dldir = bb.data.getVar('DL_DIR', data, 1) | ||
| 69 | if not dldir: | ||
| 70 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: DL_DIR not defined") | ||
| 71 | return localfn | ||
| 72 | bb.mkdirhier(dldir) | ||
| 73 | try: | ||
| 74 | bb.fetch.init([fn], data) | ||
| 75 | except bb.fetch.NoMethodError: | ||
| 76 | (type, value, traceback) = sys.exc_info() | ||
| 77 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: no method: %s" % value) | ||
| 78 | return localfn | ||
| 79 | |||
| 80 | try: | ||
| 81 | bb.fetch.go(data) | ||
| 82 | except bb.fetch.MissingParameterError: | ||
| 83 | (type, value, traceback) = sys.exc_info() | ||
| 84 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: missing parameters: %s" % value) | ||
| 85 | return localfn | ||
| 86 | except bb.fetch.FetchError: | ||
| 87 | (type, value, traceback) = sys.exc_info() | ||
| 88 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: failed: %s" % value) | ||
| 89 | return localfn | ||
| 90 | return localfn | ||
| 91 | |||
| 92 | |||
| 93 | def include(oldfn, fn, data, error_out): | ||
| 94 | """ | ||
| 95 | |||
| 96 | error_out If True a ParseError will be reaised if the to be included | ||
| 97 | """ | ||
| 98 | if oldfn == fn: # prevent infinate recursion | ||
| 99 | return None | ||
| 100 | |||
| 101 | import bb | ||
| 102 | fn = bb.data.expand(fn, data) | ||
| 103 | oldfn = bb.data.expand(oldfn, data) | ||
| 104 | |||
| 105 | from bb.parse import handle | ||
| 106 | try: | ||
| 107 | ret = handle(fn, data, True) | ||
| 108 | except IOError: | ||
| 109 | if error_out: | ||
| 110 | raise ParseError("Could not %(error_out)s file %(fn)s" % vars() ) | ||
| 111 | bb.msg.debug(2, bb.msg.domain.Parsing, "CONF file '%s' not found" % fn) | ||
| 112 | |||
| 113 | def handle(fn, data, include = 0): | ||
| 114 | if include: | ||
| 115 | inc_string = "including" | ||
| 116 | else: | ||
| 117 | inc_string = "reading" | ||
| 118 | init(data) | ||
| 119 | |||
| 120 | if include == 0: | ||
| 121 | bb.data.inheritFromOS(data) | ||
| 122 | oldfile = None | ||
| 123 | else: | ||
| 124 | oldfile = bb.data.getVar('FILE', data) | ||
| 125 | |||
| 126 | fn = obtain(fn, data) | ||
| 127 | if not os.path.isabs(fn): | ||
| 128 | f = None | ||
| 129 | bbpath = bb.data.getVar("BBPATH", data, 1) or [] | ||
| 130 | for p in bbpath.split(":"): | ||
| 131 | currname = os.path.join(p, fn) | ||
| 132 | if os.access(currname, os.R_OK): | ||
| 133 | f = open(currname, 'r') | ||
| 134 | abs_fn = currname | ||
| 135 | bb.msg.debug(2, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string, currname)) | ||
| 136 | break | ||
| 137 | if f is None: | ||
| 138 | raise IOError("file '%s' not found" % fn) | ||
| 139 | else: | ||
| 140 | f = open(fn,'r') | ||
| 141 | bb.msg.debug(1, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string,fn)) | ||
| 142 | abs_fn = fn | ||
| 143 | |||
| 144 | if include: | ||
| 145 | bb.parse.mark_dependency(data, abs_fn) | ||
| 146 | |||
| 147 | lineno = 0 | ||
| 148 | bb.data.setVar('FILE', fn, data) | ||
| 149 | while 1: | ||
| 150 | lineno = lineno + 1 | ||
| 151 | s = f.readline() | ||
| 152 | if not s: break | ||
| 153 | w = s.strip() | ||
| 154 | if not w: continue # skip empty lines | ||
| 155 | s = s.rstrip() | ||
| 156 | if s[0] == '#': continue # skip comments | ||
| 157 | while s[-1] == '\\': | ||
| 158 | s2 = f.readline()[:-1].strip() | ||
| 159 | lineno = lineno + 1 | ||
| 160 | s = s[:-1] + s2 | ||
| 161 | feeder(lineno, s, fn, data) | ||
| 162 | |||
| 163 | if oldfile: | ||
| 164 | bb.data.setVar('FILE', oldfile, data) | ||
| 165 | return data | ||
| 166 | |||
| 167 | def feeder(lineno, s, fn, data): | ||
| 168 | def getFunc(groupd, key, data): | ||
| 169 | if 'flag' in groupd and groupd['flag'] != None: | ||
| 170 | return bb.data.getVarFlag(key, groupd['flag'], data) | ||
| 171 | else: | ||
| 172 | return bb.data.getVar(key, data) | ||
| 173 | |||
| 174 | m = __config_regexp__.match(s) | ||
| 175 | if m: | ||
| 176 | groupd = m.groupdict() | ||
| 177 | key = groupd["var"] | ||
| 178 | if "exp" in groupd and groupd["exp"] != None: | ||
| 179 | bb.data.setVarFlag(key, "export", 1, data) | ||
| 180 | if "ques" in groupd and groupd["ques"] != None: | ||
| 181 | val = getFunc(groupd, key, data) | ||
| 182 | if val == None: | ||
| 183 | val = groupd["value"] | ||
| 184 | elif "colon" in groupd and groupd["colon"] != None: | ||
| 185 | e = data.createCopy() | ||
| 186 | bb.data.update_data(e) | ||
| 187 | val = bb.data.expand(groupd["value"], e) | ||
| 188 | elif "append" in groupd and groupd["append"] != None: | ||
| 189 | val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"]) | ||
| 190 | elif "prepend" in groupd and groupd["prepend"] != None: | ||
| 191 | val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or "")) | ||
| 192 | elif "postdot" in groupd and groupd["postdot"] != None: | ||
| 193 | val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"]) | ||
| 194 | elif "predot" in groupd and groupd["predot"] != None: | ||
| 195 | val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or "")) | ||
| 196 | else: | ||
| 197 | val = groupd["value"] | ||
| 198 | if 'flag' in groupd and groupd['flag'] != None: | ||
| 199 | bb.msg.debug(3, bb.msg.domain.Parsing, "setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val)) | ||
| 200 | bb.data.setVarFlag(key, groupd['flag'], val, data) | ||
| 201 | else: | ||
| 202 | bb.data.setVar(key, val, data) | ||
| 203 | return | ||
| 204 | |||
| 205 | m = __include_regexp__.match(s) | ||
| 206 | if m: | ||
| 207 | s = bb.data.expand(m.group(1), data) | ||
| 208 | bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (fn, lineno, s)) | ||
| 209 | include(fn, s, data, False) | ||
| 210 | return | ||
| 211 | |||
| 212 | m = __require_regexp__.match(s) | ||
| 213 | if m: | ||
| 214 | s = bb.data.expand(m.group(1), data) | ||
| 215 | include(fn, s, data, "include required") | ||
| 216 | return | ||
| 217 | |||
| 218 | m = __export_regexp__.match(s) | ||
| 219 | if m: | ||
| 220 | bb.data.setVarFlag(m.group(1), "export", 1, data) | ||
| 221 | return | ||
| 222 | |||
| 223 | raise ParseError("%s:%d: unparsed line: '%s'" % (fn, lineno, s)); | ||
| 224 | |||
| 225 | # Add us to the handlers list | ||
| 226 | from bb.parse import handlers | ||
| 227 | handlers.append({'supports': supports, 'handle': handle, 'init': init}) | ||
| 228 | del handlers | ||
diff --git a/bitbake-dev/lib/bb/parse/parse_py/__init__.py b/bitbake-dev/lib/bb/parse/parse_py/__init__.py new file mode 100644 index 0000000000..9e0e00adda --- /dev/null +++ b/bitbake-dev/lib/bb/parse/parse_py/__init__.py | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | """ | ||
| 5 | BitBake Parsers | ||
| 6 | |||
| 7 | File parsers for the BitBake build tools. | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 12 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 13 | # | ||
| 14 | # This program is free software; you can redistribute it and/or modify | ||
| 15 | # it under the terms of the GNU General Public License version 2 as | ||
| 16 | # published by the Free Software Foundation. | ||
| 17 | # | ||
| 18 | # This program is distributed in the hope that it will be useful, | ||
| 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | # GNU General Public License for more details. | ||
| 22 | # | ||
| 23 | # You should have received a copy of the GNU General Public License along | ||
| 24 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 25 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 26 | # | ||
| 27 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
| 28 | __version__ = '1.0' | ||
| 29 | |||
| 30 | __all__ = [ 'ConfHandler', 'BBHandler'] | ||
| 31 | |||
| 32 | import ConfHandler | ||
| 33 | import BBHandler | ||
diff --git a/bitbake-dev/lib/bb/persist_data.py b/bitbake-dev/lib/bb/persist_data.py new file mode 100644 index 0000000000..79e7448bee --- /dev/null +++ b/bitbake-dev/lib/bb/persist_data.py | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | # BitBake Persistent Data Store | ||
| 2 | # | ||
| 3 | # Copyright (C) 2007 Richard Purdie | ||
| 4 | # | ||
| 5 | # This program is free software; you can redistribute it and/or modify | ||
| 6 | # it under the terms of the GNU General Public License version 2 as | ||
| 7 | # published by the Free Software Foundation. | ||
| 8 | # | ||
| 9 | # This program is distributed in the hope that it will be useful, | ||
| 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | # GNU General Public License for more details. | ||
| 13 | # | ||
| 14 | # You should have received a copy of the GNU General Public License along | ||
| 15 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | |||
| 18 | import bb, os | ||
| 19 | |||
| 20 | try: | ||
| 21 | import sqlite3 | ||
| 22 | except ImportError: | ||
| 23 | try: | ||
| 24 | from pysqlite2 import dbapi2 as sqlite3 | ||
| 25 | except ImportError: | ||
| 26 | bb.msg.fatal(bb.msg.domain.PersistData, "Importing sqlite3 and pysqlite2 failed, please install one of them. Python 2.5 or a 'python-pysqlite2' like package is likely to be what you need.") | ||
| 27 | |||
| 28 | sqlversion = sqlite3.sqlite_version_info | ||
| 29 | if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): | ||
| 30 | bb.msg.fatal(bb.msg.domain.PersistData, "sqlite3 version 3.3.0 or later is required.") | ||
| 31 | |||
| 32 | class PersistData: | ||
| 33 | """ | ||
| 34 | BitBake Persistent Data Store | ||
| 35 | |||
| 36 | Used to store data in a central location such that other threads/tasks can | ||
| 37 | access them at some future date. | ||
| 38 | |||
| 39 | The "domain" is used as a key to isolate each data pool and in this | ||
| 40 | implementation corresponds to an SQL table. The SQL table consists of a | ||
| 41 | simple key and value pair. | ||
| 42 | |||
| 43 | Why sqlite? It handles all the locking issues for us. | ||
| 44 | """ | ||
| 45 | def __init__(self, d): | ||
| 46 | self.cachedir = bb.data.getVar("PERSISTENT_DIR", d, True) or bb.data.getVar("CACHE", d, True) | ||
| 47 | if self.cachedir in [None, '']: | ||
| 48 | bb.msg.fatal(bb.msg.domain.PersistData, "Please set the 'PERSISTENT_DIR' or 'CACHE' variable.") | ||
| 49 | try: | ||
| 50 | os.stat(self.cachedir) | ||
| 51 | except OSError: | ||
| 52 | bb.mkdirhier(self.cachedir) | ||
| 53 | |||
| 54 | self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3") | ||
| 55 | bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) | ||
| 56 | |||
| 57 | self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) | ||
| 58 | |||
| 59 | def addDomain(self, domain): | ||
| 60 | """ | ||
| 61 | Should be called before any domain is used | ||
| 62 | Creates it if it doesn't exist. | ||
| 63 | """ | ||
| 64 | self.connection.execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);" % domain) | ||
| 65 | |||
| 66 | def delDomain(self, domain): | ||
| 67 | """ | ||
| 68 | Removes a domain and all the data it contains | ||
| 69 | """ | ||
| 70 | self.connection.execute("DROP TABLE IF EXISTS %s;" % domain) | ||
| 71 | |||
| 72 | def getValue(self, domain, key): | ||
| 73 | """ | ||
| 74 | Return the value of a key for a domain | ||
| 75 | """ | ||
| 76 | data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key]) | ||
| 77 | for row in data: | ||
| 78 | return row[1] | ||
| 79 | |||
| 80 | def setValue(self, domain, key, value): | ||
| 81 | """ | ||
| 82 | Sets the value of a key for a domain | ||
| 83 | """ | ||
| 84 | data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key]) | ||
| 85 | rows = 0 | ||
| 86 | for row in data: | ||
| 87 | rows = rows + 1 | ||
| 88 | if rows: | ||
| 89 | self._execute("UPDATE %s SET value=? WHERE key=?;" % domain, [value, key]) | ||
| 90 | else: | ||
| 91 | self._execute("INSERT into %s(key, value) values (?, ?);" % domain, [key, value]) | ||
| 92 | |||
| 93 | def delValue(self, domain, key): | ||
| 94 | """ | ||
| 95 | Deletes a key/value pair | ||
| 96 | """ | ||
| 97 | self._execute("DELETE from %s where key=?;" % domain, [key]) | ||
| 98 | |||
| 99 | def _execute(self, *query): | ||
| 100 | while True: | ||
| 101 | try: | ||
| 102 | self.connection.execute(*query) | ||
| 103 | return | ||
| 104 | except sqlite3.OperationalError, e: | ||
| 105 | if 'database is locked' in str(e): | ||
| 106 | continue | ||
| 107 | raise | ||
| 108 | |||
| 109 | |||
| 110 | |||
diff --git a/bitbake-dev/lib/bb/providers.py b/bitbake-dev/lib/bb/providers.py new file mode 100644 index 0000000000..0ad5876ef0 --- /dev/null +++ b/bitbake-dev/lib/bb/providers.py | |||
| @@ -0,0 +1,303 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # Copyright (C) 2003, 2004 Chris Larson | ||
| 5 | # Copyright (C) 2003, 2004 Phil Blundell | ||
| 6 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | ||
| 7 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
| 8 | # Copyright (C) 2005 ROAD GmbH | ||
| 9 | # Copyright (C) 2006 Richard Purdie | ||
| 10 | # | ||
| 11 | # This program is free software; you can redistribute it and/or modify | ||
| 12 | # it under the terms of the GNU General Public License version 2 as | ||
| 13 | # published by the Free Software Foundation. | ||
| 14 | # | ||
| 15 | # This program is distributed in the hope that it will be useful, | ||
| 16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | # GNU General Public License for more details. | ||
| 19 | # | ||
| 20 | # You should have received a copy of the GNU General Public License along | ||
| 21 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 22 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 23 | |||
| 24 | import os, re | ||
| 25 | from bb import data, utils | ||
| 26 | import bb | ||
| 27 | |||
| 28 | class NoProvider(Exception): | ||
| 29 | """Exception raised when no provider of a build dependency can be found""" | ||
| 30 | |||
| 31 | class NoRProvider(Exception): | ||
| 32 | """Exception raised when no provider of a runtime dependency can be found""" | ||
| 33 | |||
| 34 | |||
| 35 | def sortPriorities(pn, dataCache, pkg_pn = None): | ||
| 36 | """ | ||
| 37 | Reorder pkg_pn by file priority and default preference | ||
| 38 | """ | ||
| 39 | |||
| 40 | if not pkg_pn: | ||
| 41 | pkg_pn = dataCache.pkg_pn | ||
| 42 | |||
| 43 | files = pkg_pn[pn] | ||
| 44 | priorities = {} | ||
| 45 | for f in files: | ||
| 46 | priority = dataCache.bbfile_priority[f] | ||
| 47 | preference = dataCache.pkg_dp[f] | ||
| 48 | if priority not in priorities: | ||
| 49 | priorities[priority] = {} | ||
| 50 | if preference not in priorities[priority]: | ||
| 51 | priorities[priority][preference] = [] | ||
| 52 | priorities[priority][preference].append(f) | ||
| 53 | pri_list = priorities.keys() | ||
| 54 | pri_list.sort(lambda a, b: a - b) | ||
| 55 | tmp_pn = [] | ||
| 56 | for pri in pri_list: | ||
| 57 | pref_list = priorities[pri].keys() | ||
| 58 | pref_list.sort(lambda a, b: b - a) | ||
| 59 | tmp_pref = [] | ||
| 60 | for pref in pref_list: | ||
| 61 | tmp_pref.extend(priorities[pri][pref]) | ||
| 62 | tmp_pn = [tmp_pref] + tmp_pn | ||
| 63 | |||
| 64 | return tmp_pn | ||
| 65 | |||
| 66 | |||
| 67 | def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): | ||
| 68 | """ | ||
| 69 | Find the first provider in pkg_pn with a PREFERRED_VERSION set. | ||
| 70 | """ | ||
| 71 | |||
| 72 | preferred_file = None | ||
| 73 | preferred_ver = None | ||
| 74 | |||
| 75 | localdata = data.createCopy(cfgData) | ||
| 76 | bb.data.setVar('OVERRIDES', "pn-%s:%s:%s" % (pn, pn, data.getVar('OVERRIDES', localdata)), localdata) | ||
| 77 | bb.data.update_data(localdata) | ||
| 78 | |||
| 79 | preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True) | ||
| 80 | if preferred_v: | ||
| 81 | m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) | ||
| 82 | if m: | ||
| 83 | if m.group(1): | ||
| 84 | preferred_e = int(m.group(1)[:-1]) | ||
| 85 | else: | ||
| 86 | preferred_e = None | ||
| 87 | preferred_v = m.group(2) | ||
| 88 | if m.group(3): | ||
| 89 | preferred_r = m.group(3)[1:] | ||
| 90 | else: | ||
| 91 | preferred_r = None | ||
| 92 | else: | ||
| 93 | preferred_e = None | ||
| 94 | preferred_r = None | ||
| 95 | |||
| 96 | for file_set in pkg_pn: | ||
| 97 | for f in file_set: | ||
| 98 | pe,pv,pr = dataCache.pkg_pepvpr[f] | ||
| 99 | if preferred_v == pv and (preferred_r == pr or preferred_r == None) and (preferred_e == pe or preferred_e == None): | ||
| 100 | preferred_file = f | ||
| 101 | preferred_ver = (pe, pv, pr) | ||
| 102 | break | ||
| 103 | if preferred_file: | ||
| 104 | break; | ||
| 105 | if preferred_r: | ||
| 106 | pv_str = '%s-%s' % (preferred_v, preferred_r) | ||
| 107 | else: | ||
| 108 | pv_str = preferred_v | ||
| 109 | if not (preferred_e is None): | ||
| 110 | pv_str = '%s:%s' % (preferred_e, pv_str) | ||
| 111 | itemstr = "" | ||
| 112 | if item: | ||
| 113 | itemstr = " (for item %s)" % item | ||
| 114 | if preferred_file is None: | ||
| 115 | bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr)) | ||
| 116 | else: | ||
| 117 | bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr)) | ||
| 118 | |||
| 119 | return (preferred_ver, preferred_file) | ||
| 120 | |||
| 121 | |||
| 122 | def findLatestProvider(pn, cfgData, dataCache, file_set): | ||
| 123 | """ | ||
| 124 | Return the highest version of the providers in file_set. | ||
| 125 | Take default preferences into account. | ||
| 126 | """ | ||
| 127 | latest = None | ||
| 128 | latest_p = 0 | ||
| 129 | latest_f = None | ||
| 130 | for file_name in file_set: | ||
| 131 | pe,pv,pr = dataCache.pkg_pepvpr[file_name] | ||
| 132 | dp = dataCache.pkg_dp[file_name] | ||
| 133 | |||
| 134 | if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p): | ||
| 135 | latest = (pe, pv, pr) | ||
| 136 | latest_f = file_name | ||
| 137 | latest_p = dp | ||
| 138 | |||
| 139 | return (latest, latest_f) | ||
| 140 | |||
| 141 | |||
| 142 | def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): | ||
| 143 | """ | ||
| 144 | If there is a PREFERRED_VERSION, find the highest-priority bbfile | ||
| 145 | providing that version. If not, find the latest version provided by | ||
| 146 | an bbfile in the highest-priority set. | ||
| 147 | """ | ||
| 148 | |||
| 149 | sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn) | ||
| 150 | # Find the highest priority provider with a PREFERRED_VERSION set | ||
| 151 | (preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item) | ||
| 152 | # Find the latest version of the highest priority provider | ||
| 153 | (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0]) | ||
| 154 | |||
| 155 | if preferred_file is None: | ||
| 156 | preferred_file = latest_f | ||
| 157 | preferred_ver = latest | ||
| 158 | |||
| 159 | return (latest, latest_f, preferred_ver, preferred_file) | ||
| 160 | |||
| 161 | |||
| 162 | def _filterProviders(providers, item, cfgData, dataCache): | ||
| 163 | """ | ||
| 164 | Take a list of providers and filter/reorder according to the | ||
| 165 | environment variables and previous build results | ||
| 166 | """ | ||
| 167 | eligible = [] | ||
| 168 | preferred_versions = {} | ||
| 169 | sortpkg_pn = {} | ||
| 170 | |||
| 171 | # The order of providers depends on the order of the files on the disk | ||
| 172 | # up to here. Sort pkg_pn to make dependency issues reproducible rather | ||
| 173 | # than effectively random. | ||
| 174 | providers.sort() | ||
| 175 | |||
| 176 | # Collate providers by PN | ||
| 177 | pkg_pn = {} | ||
| 178 | for p in providers: | ||
| 179 | pn = dataCache.pkg_fn[p] | ||
| 180 | if pn not in pkg_pn: | ||
| 181 | pkg_pn[pn] = [] | ||
| 182 | pkg_pn[pn].append(p) | ||
| 183 | |||
| 184 | bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys())) | ||
| 185 | |||
| 186 | # First add PREFERRED_VERSIONS | ||
| 187 | for pn in pkg_pn.keys(): | ||
| 188 | sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn) | ||
| 189 | preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item) | ||
| 190 | if preferred_versions[pn][1]: | ||
| 191 | eligible.append(preferred_versions[pn][1]) | ||
| 192 | |||
| 193 | # Now add latest verisons | ||
| 194 | for pn in pkg_pn.keys(): | ||
| 195 | if pn in preferred_versions and preferred_versions[pn][1]: | ||
| 196 | continue | ||
| 197 | preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0]) | ||
| 198 | eligible.append(preferred_versions[pn][1]) | ||
| 199 | |||
| 200 | if len(eligible) == 0: | ||
| 201 | bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item) | ||
| 202 | return 0 | ||
| 203 | |||
| 204 | # If pn == item, give it a slight default preference | ||
| 205 | # This means PREFERRED_PROVIDER_foobar defaults to foobar if available | ||
| 206 | for p in providers: | ||
| 207 | pn = dataCache.pkg_fn[p] | ||
| 208 | if pn != item: | ||
| 209 | continue | ||
| 210 | (newvers, fn) = preferred_versions[pn] | ||
| 211 | if not fn in eligible: | ||
| 212 | continue | ||
| 213 | eligible.remove(fn) | ||
| 214 | eligible = [fn] + eligible | ||
| 215 | |||
| 216 | return eligible | ||
| 217 | |||
| 218 | |||
| 219 | def filterProviders(providers, item, cfgData, dataCache): | ||
| 220 | """ | ||
| 221 | Take a list of providers and filter/reorder according to the | ||
| 222 | environment variables and previous build results | ||
| 223 | Takes a "normal" target item | ||
| 224 | """ | ||
| 225 | |||
| 226 | eligible = _filterProviders(providers, item, cfgData, dataCache) | ||
| 227 | |||
| 228 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, cfgData, 1) | ||
| 229 | if prefervar: | ||
| 230 | dataCache.preferred[item] = prefervar | ||
| 231 | |||
| 232 | foundUnique = False | ||
| 233 | if item in dataCache.preferred: | ||
| 234 | for p in eligible: | ||
| 235 | pn = dataCache.pkg_fn[p] | ||
| 236 | if dataCache.preferred[item] == pn: | ||
| 237 | bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item)) | ||
| 238 | eligible.remove(p) | ||
| 239 | eligible = [p] + eligible | ||
| 240 | foundUnique = True | ||
| 241 | break | ||
| 242 | |||
| 243 | bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) | ||
| 244 | |||
| 245 | return eligible, foundUnique | ||
| 246 | |||
| 247 | def filterProvidersRunTime(providers, item, cfgData, dataCache): | ||
| 248 | """ | ||
| 249 | Take a list of providers and filter/reorder according to the | ||
| 250 | environment variables and previous build results | ||
| 251 | Takes a "runtime" target item | ||
| 252 | """ | ||
| 253 | |||
| 254 | eligible = _filterProviders(providers, item, cfgData, dataCache) | ||
| 255 | |||
| 256 | # Should use dataCache.preferred here? | ||
| 257 | preferred = [] | ||
| 258 | preferred_vars = [] | ||
| 259 | for p in eligible: | ||
| 260 | pn = dataCache.pkg_fn[p] | ||
| 261 | provides = dataCache.pn_provides[pn] | ||
| 262 | for provide in provides: | ||
| 263 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1) | ||
| 264 | if prefervar == pn: | ||
| 265 | var = "PREFERRED_PROVIDERS_%s = %s" % (provide, prefervar) | ||
| 266 | bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to %s" % (pn, item, var)) | ||
| 267 | preferred_vars.append(var) | ||
| 268 | eligible.remove(p) | ||
| 269 | eligible = [p] + eligible | ||
| 270 | preferred.append(p) | ||
| 271 | break | ||
| 272 | |||
| 273 | numberPreferred = len(preferred) | ||
| 274 | |||
| 275 | if numberPreferred > 1: | ||
| 276 | bb.msg.error(bb.msg.domain.Provider, "Conflicting PREFERRED_PROVIDERS entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s" % (preferred, item, preferred_vars)) | ||
| 277 | |||
| 278 | bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) | ||
| 279 | |||
| 280 | return eligible, numberPreferred | ||
| 281 | |||
| 282 | def getRuntimeProviders(dataCache, rdepend): | ||
| 283 | """ | ||
| 284 | Return any providers of runtime dependency | ||
| 285 | """ | ||
| 286 | rproviders = [] | ||
| 287 | |||
| 288 | if rdepend in dataCache.rproviders: | ||
| 289 | rproviders += dataCache.rproviders[rdepend] | ||
| 290 | |||
| 291 | if rdepend in dataCache.packages: | ||
| 292 | rproviders += dataCache.packages[rdepend] | ||
| 293 | |||
| 294 | if rproviders: | ||
| 295 | return rproviders | ||
| 296 | |||
| 297 | # Only search dynamic packages if we can't find anything in other variables | ||
| 298 | for pattern in dataCache.packages_dynamic: | ||
| 299 | regexp = re.compile(pattern) | ||
| 300 | if regexp.match(rdepend): | ||
| 301 | rproviders += dataCache.packages_dynamic[pattern] | ||
| 302 | |||
| 303 | return rproviders | ||
diff --git a/bitbake-dev/lib/bb/runqueue.py b/bitbake-dev/lib/bb/runqueue.py new file mode 100644 index 0000000000..4130b50641 --- /dev/null +++ b/bitbake-dev/lib/bb/runqueue.py | |||
| @@ -0,0 +1,1157 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | """ | ||
| 5 | BitBake 'RunQueue' implementation | ||
| 6 | |||
| 7 | Handles preparation and execution of a queue of tasks | ||
| 8 | """ | ||
| 9 | |||
| 10 | # Copyright (C) 2006-2007 Richard Purdie | ||
| 11 | # | ||
| 12 | # This program is free software; you can redistribute it and/or modify | ||
| 13 | # it under the terms of the GNU General Public License version 2 as | ||
| 14 | # published by the Free Software Foundation. | ||
| 15 | # | ||
| 16 | # This program is distributed in the hope that it will be useful, | ||
| 17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | # GNU General Public License for more details. | ||
| 20 | # | ||
| 21 | # You should have received a copy of the GNU General Public License along | ||
| 22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 24 | |||
| 25 | from bb import msg, data, event, mkdirhier, utils | ||
| 26 | from sets import Set | ||
| 27 | import bb, os, sys | ||
| 28 | import signal | ||
| 29 | import stat | ||
| 30 | |||
| 31 | class TaskFailure(Exception): | ||
| 32 | """Exception raised when a task in a runqueue fails""" | ||
| 33 | def __init__(self, x): | ||
| 34 | self.args = x | ||
| 35 | |||
| 36 | |||
| 37 | class RunQueueStats: | ||
| 38 | """ | ||
| 39 | Holds statistics on the tasks handled by the associated runQueue | ||
| 40 | """ | ||
| 41 | def __init__(self, total): | ||
| 42 | self.completed = 0 | ||
| 43 | self.skipped = 0 | ||
| 44 | self.failed = 0 | ||
| 45 | self.active = 0 | ||
| 46 | self.total = total | ||
| 47 | |||
| 48 | def taskFailed(self): | ||
| 49 | self.active = self.active - 1 | ||
| 50 | self.failed = self.failed + 1 | ||
| 51 | |||
| 52 | def taskCompleted(self, number = 1): | ||
| 53 | self.active = self.active - number | ||
| 54 | self.completed = self.completed + number | ||
| 55 | |||
| 56 | def taskSkipped(self, number = 1): | ||
| 57 | self.active = self.active + number | ||
| 58 | self.skipped = self.skipped + number | ||
| 59 | |||
| 60 | def taskActive(self): | ||
| 61 | self.active = self.active + 1 | ||
| 62 | |||
| 63 | # These values indicate the next step due to be run in the | ||
| 64 | # runQueue state machine | ||
| 65 | runQueuePrepare = 2 | ||
| 66 | runQueueRunInit = 3 | ||
| 67 | runQueueRunning = 4 | ||
| 68 | runQueueFailed = 6 | ||
| 69 | runQueueCleanUp = 7 | ||
| 70 | runQueueComplete = 8 | ||
| 71 | runQueueChildProcess = 9 | ||
| 72 | |||
| 73 | class RunQueueScheduler: | ||
| 74 | """ | ||
| 75 | Control the order tasks are scheduled in. | ||
| 76 | """ | ||
| 77 | def __init__(self, runqueue): | ||
| 78 | """ | ||
| 79 | The default scheduler just returns the first buildable task (the | ||
| 80 | priority map is sorted by task numer) | ||
| 81 | """ | ||
| 82 | self.rq = runqueue | ||
| 83 | numTasks = len(self.rq.runq_fnid) | ||
| 84 | |||
| 85 | self.prio_map = [] | ||
| 86 | self.prio_map.extend(range(numTasks)) | ||
| 87 | |||
| 88 | def next(self): | ||
| 89 | """ | ||
| 90 | Return the id of the first task we find that is buildable | ||
| 91 | """ | ||
| 92 | for task1 in range(len(self.rq.runq_fnid)): | ||
| 93 | task = self.prio_map[task1] | ||
| 94 | if self.rq.runq_running[task] == 1: | ||
| 95 | continue | ||
| 96 | if self.rq.runq_buildable[task] == 1: | ||
| 97 | return task | ||
| 98 | |||
| 99 | class RunQueueSchedulerSpeed(RunQueueScheduler): | ||
| 100 | """ | ||
| 101 | A scheduler optimised for speed. The priority map is sorted by task weight, | ||
| 102 | heavier weighted tasks (tasks needed by the most other tasks) are run first. | ||
| 103 | """ | ||
| 104 | def __init__(self, runqueue): | ||
| 105 | """ | ||
| 106 | The priority map is sorted by task weight. | ||
| 107 | """ | ||
| 108 | from copy import deepcopy | ||
| 109 | |||
| 110 | self.rq = runqueue | ||
| 111 | |||
| 112 | sortweight = deepcopy(self.rq.runq_weight) | ||
| 113 | sortweight.sort() | ||
| 114 | copyweight = deepcopy(self.rq.runq_weight) | ||
| 115 | self.prio_map = [] | ||
| 116 | |||
| 117 | for weight in sortweight: | ||
| 118 | idx = copyweight.index(weight) | ||
| 119 | self.prio_map.append(idx) | ||
| 120 | copyweight[idx] = -1 | ||
| 121 | |||
| 122 | self.prio_map.reverse() | ||
| 123 | |||
| 124 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): | ||
| 125 | """ | ||
| 126 | A scheduler optimised to complete .bb files are quickly as possible. The | ||
| 127 | priority map is sorted by task weight, but then reordered so once a given | ||
| 128 | .bb file starts to build, its completed as quickly as possible. This works | ||
| 129 | well where disk space is at a premium and classes like OE's rm_work are in | ||
| 130 | force. | ||
| 131 | """ | ||
| 132 | def __init__(self, runqueue): | ||
| 133 | RunQueueSchedulerSpeed.__init__(self, runqueue) | ||
| 134 | from copy import deepcopy | ||
| 135 | |||
| 136 | #FIXME - whilst this groups all fnids together it does not reorder the | ||
| 137 | #fnid groups optimally. | ||
| 138 | |||
| 139 | basemap = deepcopy(self.prio_map) | ||
| 140 | self.prio_map = [] | ||
| 141 | while (len(basemap) > 0): | ||
| 142 | entry = basemap.pop(0) | ||
| 143 | self.prio_map.append(entry) | ||
| 144 | fnid = self.rq.runq_fnid[entry] | ||
| 145 | todel = [] | ||
| 146 | for entry in basemap: | ||
| 147 | entry_fnid = self.rq.runq_fnid[entry] | ||
| 148 | if entry_fnid == fnid: | ||
| 149 | todel.append(basemap.index(entry)) | ||
| 150 | self.prio_map.append(entry) | ||
| 151 | todel.reverse() | ||
| 152 | for idx in todel: | ||
| 153 | del basemap[idx] | ||
| 154 | |||
| 155 | class RunQueue: | ||
| 156 | """ | ||
| 157 | BitBake Run Queue implementation | ||
| 158 | """ | ||
| 159 | def __init__(self, cooker, cfgData, dataCache, taskData, targets): | ||
| 160 | self.reset_runqueue() | ||
| 161 | self.cooker = cooker | ||
| 162 | self.dataCache = dataCache | ||
| 163 | self.taskData = taskData | ||
| 164 | self.cfgData = cfgData | ||
| 165 | self.targets = targets | ||
| 166 | |||
| 167 | self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData, 1) or 1) | ||
| 168 | self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split() | ||
| 169 | self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed" | ||
| 170 | self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile" | ||
| 171 | self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or "" | ||
| 172 | |||
| 173 | def reset_runqueue(self): | ||
| 174 | self.runq_fnid = [] | ||
| 175 | self.runq_task = [] | ||
| 176 | self.runq_depends = [] | ||
| 177 | self.runq_revdeps = [] | ||
| 178 | |||
| 179 | self.state = runQueuePrepare | ||
| 180 | |||
| 181 | def get_user_idstring(self, task): | ||
| 182 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
| 183 | taskname = self.runq_task[task] | ||
| 184 | return "%s, %s" % (fn, taskname) | ||
| 185 | |||
| 186 | def get_task_id(self, fnid, taskname): | ||
| 187 | for listid in range(len(self.runq_fnid)): | ||
| 188 | if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname: | ||
| 189 | return listid | ||
| 190 | return None | ||
| 191 | |||
| 192 | def circular_depchains_handler(self, tasks): | ||
| 193 | """ | ||
| 194 | Some tasks aren't buildable, likely due to circular dependency issues. | ||
| 195 | Identify the circular dependencies and print them in a user readable format. | ||
| 196 | """ | ||
| 197 | from copy import deepcopy | ||
| 198 | |||
| 199 | valid_chains = [] | ||
| 200 | explored_deps = {} | ||
| 201 | msgs = [] | ||
| 202 | |||
| 203 | def chain_reorder(chain): | ||
| 204 | """ | ||
| 205 | Reorder a dependency chain so the lowest task id is first | ||
| 206 | """ | ||
| 207 | lowest = 0 | ||
| 208 | new_chain = [] | ||
| 209 | for entry in range(len(chain)): | ||
| 210 | if chain[entry] < chain[lowest]: | ||
| 211 | lowest = entry | ||
| 212 | new_chain.extend(chain[lowest:]) | ||
| 213 | new_chain.extend(chain[:lowest]) | ||
| 214 | return new_chain | ||
| 215 | |||
| 216 | def chain_compare_equal(chain1, chain2): | ||
| 217 | """ | ||
| 218 | Compare two dependency chains and see if they're the same | ||
| 219 | """ | ||
| 220 | if len(chain1) != len(chain2): | ||
| 221 | return False | ||
| 222 | for index in range(len(chain1)): | ||
| 223 | if chain1[index] != chain2[index]: | ||
| 224 | return False | ||
| 225 | return True | ||
| 226 | |||
| 227 | def chain_array_contains(chain, chain_array): | ||
| 228 | """ | ||
| 229 | Return True if chain_array contains chain | ||
| 230 | """ | ||
| 231 | for ch in chain_array: | ||
| 232 | if chain_compare_equal(ch, chain): | ||
| 233 | return True | ||
| 234 | return False | ||
| 235 | |||
| 236 | def find_chains(taskid, prev_chain): | ||
| 237 | prev_chain.append(taskid) | ||
| 238 | total_deps = [] | ||
| 239 | total_deps.extend(self.runq_revdeps[taskid]) | ||
| 240 | for revdep in self.runq_revdeps[taskid]: | ||
| 241 | if revdep in prev_chain: | ||
| 242 | idx = prev_chain.index(revdep) | ||
| 243 | # To prevent duplicates, reorder the chain to start with the lowest taskid | ||
| 244 | # and search through an array of those we've already printed | ||
| 245 | chain = prev_chain[idx:] | ||
| 246 | new_chain = chain_reorder(chain) | ||
| 247 | if not chain_array_contains(new_chain, valid_chains): | ||
| 248 | valid_chains.append(new_chain) | ||
| 249 | msgs.append("Dependency loop #%d found:\n" % len(valid_chains)) | ||
| 250 | for dep in new_chain: | ||
| 251 | msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep])) | ||
| 252 | msgs.append("\n") | ||
| 253 | if len(valid_chains) > 10: | ||
| 254 | msgs.append("Aborted dependency loops search after 10 matches.\n") | ||
| 255 | return msgs | ||
| 256 | continue | ||
| 257 | scan = False | ||
| 258 | if revdep not in explored_deps: | ||
| 259 | scan = True | ||
| 260 | elif revdep in explored_deps[revdep]: | ||
| 261 | scan = True | ||
| 262 | else: | ||
| 263 | for dep in prev_chain: | ||
| 264 | if dep in explored_deps[revdep]: | ||
| 265 | scan = True | ||
| 266 | if scan: | ||
| 267 | find_chains(revdep, deepcopy(prev_chain)) | ||
| 268 | for dep in explored_deps[revdep]: | ||
| 269 | if dep not in total_deps: | ||
| 270 | total_deps.append(dep) | ||
| 271 | |||
| 272 | explored_deps[taskid] = total_deps | ||
| 273 | |||
| 274 | for task in tasks: | ||
| 275 | find_chains(task, []) | ||
| 276 | |||
| 277 | return msgs | ||
| 278 | |||
| 279 | def calculate_task_weights(self, endpoints): | ||
| 280 | """ | ||
| 281 | Calculate a number representing the "weight" of each task. Heavier weighted tasks | ||
| 282 | have more dependencies and hence should be executed sooner for maximum speed. | ||
| 283 | |||
| 284 | This function also sanity checks the task list finding tasks that its not | ||
| 285 | possible to execute due to circular dependencies. | ||
| 286 | """ | ||
| 287 | |||
| 288 | numTasks = len(self.runq_fnid) | ||
| 289 | weight = [] | ||
| 290 | deps_left = [] | ||
| 291 | task_done = [] | ||
| 292 | |||
| 293 | for listid in range(numTasks): | ||
| 294 | task_done.append(False) | ||
| 295 | weight.append(0) | ||
| 296 | deps_left.append(len(self.runq_revdeps[listid])) | ||
| 297 | |||
| 298 | for listid in endpoints: | ||
| 299 | weight[listid] = 1 | ||
| 300 | task_done[listid] = True | ||
| 301 | |||
| 302 | while 1: | ||
| 303 | next_points = [] | ||
| 304 | for listid in endpoints: | ||
| 305 | for revdep in self.runq_depends[listid]: | ||
| 306 | weight[revdep] = weight[revdep] + weight[listid] | ||
| 307 | deps_left[revdep] = deps_left[revdep] - 1 | ||
| 308 | if deps_left[revdep] == 0: | ||
| 309 | next_points.append(revdep) | ||
| 310 | task_done[revdep] = True | ||
| 311 | endpoints = next_points | ||
| 312 | if len(next_points) == 0: | ||
| 313 | break | ||
| 314 | |||
| 315 | # Circular dependency sanity check | ||
| 316 | problem_tasks = [] | ||
| 317 | for task in range(numTasks): | ||
| 318 | if task_done[task] is False or deps_left[task] != 0: | ||
| 319 | problem_tasks.append(task) | ||
| 320 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task))) | ||
| 321 | bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task])) | ||
| 322 | |||
| 323 | if problem_tasks: | ||
| 324 | message = "Unbuildable tasks were found.\n" | ||
| 325 | message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" | ||
| 326 | message = message + "Identifying dependency loops (this may take a short while)...\n" | ||
| 327 | bb.msg.error(bb.msg.domain.RunQueue, message) | ||
| 328 | |||
| 329 | msgs = self.circular_depchains_handler(problem_tasks) | ||
| 330 | |||
| 331 | message = "\n" | ||
| 332 | for msg in msgs: | ||
| 333 | message = message + msg | ||
| 334 | bb.msg.fatal(bb.msg.domain.RunQueue, message) | ||
| 335 | |||
| 336 | return weight | ||
| 337 | |||
| 338 | def prepare_runqueue(self): | ||
| 339 | """ | ||
| 340 | Turn a set of taskData into a RunQueue and compute data needed | ||
| 341 | to optimise the execution order. | ||
| 342 | """ | ||
| 343 | |||
| 344 | depends = [] | ||
| 345 | runq_build = [] | ||
| 346 | recursive_tdepends = {} | ||
| 347 | |||
| 348 | taskData = self.taskData | ||
| 349 | |||
| 350 | if len(taskData.tasks_name) == 0: | ||
| 351 | # Nothing to do | ||
| 352 | return | ||
| 353 | |||
| 354 | bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") | ||
| 355 | |||
| 356 | # Step A - Work out a list of tasks to run | ||
| 357 | # | ||
| 358 | # Taskdata gives us a list of possible providers for a every target | ||
| 359 | # ordered by priority (build_targets, run_targets). It also gives | ||
| 360 | # information on each of those providers. | ||
| 361 | # | ||
| 362 | # To create the actual list of tasks to execute we fix the list of | ||
| 363 | # providers and then resolve the dependencies into task IDs. This | ||
| 364 | # process is repeated for each type of dependency (tdepends, deptask, | ||
| 365 | # rdeptast, recrdeptask, idepends). | ||
| 366 | |||
| 367 | for task in range(len(taskData.tasks_name)): | ||
| 368 | fnid = taskData.tasks_fnid[task] | ||
| 369 | fn = taskData.fn_index[fnid] | ||
| 370 | task_deps = self.dataCache.task_deps[fn] | ||
| 371 | |||
| 372 | if fnid not in taskData.failed_fnids: | ||
| 373 | |||
| 374 | # Resolve task internal dependencies | ||
| 375 | # | ||
| 376 | # e.g. addtask before X after Y | ||
| 377 | depends = taskData.tasks_tdepends[task] | ||
| 378 | |||
| 379 | # Resolve 'deptask' dependencies | ||
| 380 | # | ||
| 381 | # e.g. do_sometask[deptask] = "do_someothertask" | ||
| 382 | # (makes sure sometask runs after someothertask of all DEPENDS) | ||
| 383 | if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']: | ||
| 384 | tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() | ||
| 385 | for depid in taskData.depids[fnid]: | ||
| 386 | # Won't be in build_targets if ASSUME_PROVIDED | ||
| 387 | if depid in taskData.build_targets: | ||
| 388 | depdata = taskData.build_targets[depid][0] | ||
| 389 | if depdata is not None: | ||
| 390 | dep = taskData.fn_index[depdata] | ||
| 391 | for taskname in tasknames: | ||
| 392 | depends.append(taskData.gettask_id(dep, taskname)) | ||
| 393 | |||
| 394 | # Resolve 'rdeptask' dependencies | ||
| 395 | # | ||
| 396 | # e.g. do_sometask[rdeptask] = "do_someothertask" | ||
| 397 | # (makes sure sometask runs after someothertask of all RDEPENDS) | ||
| 398 | if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']: | ||
| 399 | taskname = task_deps['rdeptask'][taskData.tasks_name[task]] | ||
| 400 | for depid in taskData.rdepids[fnid]: | ||
| 401 | if depid in taskData.run_targets: | ||
| 402 | depdata = taskData.run_targets[depid][0] | ||
| 403 | if depdata is not None: | ||
| 404 | dep = taskData.fn_index[depdata] | ||
| 405 | depends.append(taskData.gettask_id(dep, taskname)) | ||
| 406 | |||
| 407 | # Resolve inter-task dependencies | ||
| 408 | # | ||
| 409 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | ||
| 410 | # (makes sure sometask runs after targetname's someothertask) | ||
| 411 | idepends = taskData.tasks_idepends[task] | ||
| 412 | for (depid, idependtask) in idepends: | ||
| 413 | if depid in taskData.build_targets: | ||
| 414 | # Won't be in build_targets if ASSUME_PROVIDED | ||
| 415 | depdata = taskData.build_targets[depid][0] | ||
| 416 | if depdata is not None: | ||
| 417 | dep = taskData.fn_index[depdata] | ||
| 418 | depends.append(taskData.gettask_id(dep, idependtask)) | ||
| 419 | |||
| 420 | # Create a list of recursive dependent tasks (from tdepends) and cache | ||
| 421 | def get_recursive_tdepends(task): | ||
| 422 | if not task: | ||
| 423 | return [] | ||
| 424 | if task in recursive_tdepends: | ||
| 425 | return recursive_tdepends[task] | ||
| 426 | |||
| 427 | fnid = taskData.tasks_fnid[task] | ||
| 428 | taskids = taskData.gettask_ids(fnid) | ||
| 429 | |||
| 430 | rectdepends = taskids | ||
| 431 | nextdeps = taskids | ||
| 432 | while len(nextdeps) != 0: | ||
| 433 | newdeps = [] | ||
| 434 | for nextdep in nextdeps: | ||
| 435 | for tdepend in taskData.tasks_tdepends[nextdep]: | ||
| 436 | if tdepend not in rectdepends: | ||
| 437 | rectdepends.append(tdepend) | ||
| 438 | newdeps.append(tdepend) | ||
| 439 | nextdeps = newdeps | ||
| 440 | recursive_tdepends[task] = rectdepends | ||
| 441 | return rectdepends | ||
| 442 | |||
| 443 | # Using the list of tdepends for this task create a list of | ||
| 444 | # the recursive idepends we have | ||
| 445 | def get_recursive_idepends(task): | ||
| 446 | if not task: | ||
| 447 | return [] | ||
| 448 | rectdepends = get_recursive_tdepends(task) | ||
| 449 | |||
| 450 | recidepends = [] | ||
| 451 | for tdepend in rectdepends: | ||
| 452 | for idepend in taskData.tasks_idepends[tdepend]: | ||
| 453 | recidepends.append(idepend) | ||
| 454 | return recidepends | ||
| 455 | |||
| 456 | def add_recursive_build(depid, depfnid): | ||
| 457 | """ | ||
| 458 | Add build depends of depid to depends | ||
| 459 | (if we've not see it before) | ||
| 460 | (calls itself recursively) | ||
| 461 | """ | ||
| 462 | if str(depid) in dep_seen: | ||
| 463 | return | ||
| 464 | dep_seen.append(depid) | ||
| 465 | if depid in taskData.build_targets: | ||
| 466 | depdata = taskData.build_targets[depid][0] | ||
| 467 | if depdata is not None: | ||
| 468 | dep = taskData.fn_index[depdata] | ||
| 469 | # Need to avoid creating new tasks here | ||
| 470 | taskid = taskData.gettask_id(dep, taskname, False) | ||
| 471 | if taskid is not None: | ||
| 472 | depends.append(taskid) | ||
| 473 | fnid = taskData.tasks_fnid[taskid] | ||
| 474 | #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid]) | ||
| 475 | else: | ||
| 476 | fnid = taskData.getfn_id(dep) | ||
| 477 | for nextdepid in taskData.depids[fnid]: | ||
| 478 | if nextdepid not in dep_seen: | ||
| 479 | add_recursive_build(nextdepid, fnid) | ||
| 480 | for nextdepid in taskData.rdepids[fnid]: | ||
| 481 | if nextdepid not in rdep_seen: | ||
| 482 | add_recursive_run(nextdepid, fnid) | ||
| 483 | for (idependid, idependtask) in get_recursive_idepends(taskid): | ||
| 484 | if idependid not in dep_seen: | ||
| 485 | add_recursive_build(idependid, fnid) | ||
| 486 | |||
| 487 | def add_recursive_run(rdepid, depfnid): | ||
| 488 | """ | ||
| 489 | Add runtime depends of rdepid to depends | ||
| 490 | (if we've not see it before) | ||
| 491 | (calls itself recursively) | ||
| 492 | """ | ||
| 493 | if str(rdepid) in rdep_seen: | ||
| 494 | return | ||
| 495 | rdep_seen.append(rdepid) | ||
| 496 | if rdepid in taskData.run_targets: | ||
| 497 | depdata = taskData.run_targets[rdepid][0] | ||
| 498 | if depdata is not None: | ||
| 499 | dep = taskData.fn_index[depdata] | ||
| 500 | # Need to avoid creating new tasks here | ||
| 501 | taskid = taskData.gettask_id(dep, taskname, False) | ||
| 502 | if taskid is not None: | ||
| 503 | depends.append(taskid) | ||
| 504 | fnid = taskData.tasks_fnid[taskid] | ||
| 505 | #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid]) | ||
| 506 | else: | ||
| 507 | fnid = taskData.getfn_id(dep) | ||
| 508 | for nextdepid in taskData.depids[fnid]: | ||
| 509 | if nextdepid not in dep_seen: | ||
| 510 | add_recursive_build(nextdepid, fnid) | ||
| 511 | for nextdepid in taskData.rdepids[fnid]: | ||
| 512 | if nextdepid not in rdep_seen: | ||
| 513 | add_recursive_run(nextdepid, fnid) | ||
| 514 | for (idependid, idependtask) in get_recursive_idepends(taskid): | ||
| 515 | if idependid not in dep_seen: | ||
| 516 | add_recursive_build(idependid, fnid) | ||
| 517 | |||
| 518 | # Resolve recursive 'recrdeptask' dependencies | ||
| 519 | # | ||
| 520 | # e.g. do_sometask[recrdeptask] = "do_someothertask" | ||
| 521 | # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) | ||
| 522 | if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']: | ||
| 523 | for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split(): | ||
| 524 | dep_seen = [] | ||
| 525 | rdep_seen = [] | ||
| 526 | idep_seen = [] | ||
| 527 | for depid in taskData.depids[fnid]: | ||
| 528 | add_recursive_build(depid, fnid) | ||
| 529 | for rdepid in taskData.rdepids[fnid]: | ||
| 530 | add_recursive_run(rdepid, fnid) | ||
| 531 | deptaskid = taskData.gettask_id(fn, taskname, False) | ||
| 532 | for (idependid, idependtask) in get_recursive_idepends(deptaskid): | ||
| 533 | add_recursive_build(idependid, fnid) | ||
| 534 | |||
| 535 | # Rmove all self references | ||
| 536 | if task in depends: | ||
| 537 | newdep = [] | ||
| 538 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) | ||
| 539 | for dep in depends: | ||
| 540 | if task != dep: | ||
| 541 | newdep.append(dep) | ||
| 542 | depends = newdep | ||
| 543 | |||
| 544 | |||
| 545 | self.runq_fnid.append(taskData.tasks_fnid[task]) | ||
| 546 | self.runq_task.append(taskData.tasks_name[task]) | ||
| 547 | self.runq_depends.append(Set(depends)) | ||
| 548 | self.runq_revdeps.append(Set()) | ||
| 549 | |||
| 550 | runq_build.append(0) | ||
| 551 | |||
| 552 | # Step B - Mark all active tasks | ||
| 553 | # | ||
| 554 | # Start with the tasks we were asked to run and mark all dependencies | ||
| 555 | # as active too. If the task is to be 'forced', clear its stamp. Once | ||
| 556 | # all active tasks are marked, prune the ones we don't need. | ||
| 557 | |||
| 558 | bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") | ||
| 559 | |||
| 560 | def mark_active(listid, depth): | ||
| 561 | """ | ||
| 562 | Mark an item as active along with its depends | ||
| 563 | (calls itself recursively) | ||
| 564 | """ | ||
| 565 | |||
| 566 | if runq_build[listid] == 1: | ||
| 567 | return | ||
| 568 | |||
| 569 | runq_build[listid] = 1 | ||
| 570 | |||
| 571 | depends = self.runq_depends[listid] | ||
| 572 | for depend in depends: | ||
| 573 | mark_active(depend, depth+1) | ||
| 574 | |||
| 575 | self.target_pairs = [] | ||
| 576 | for target in self.targets: | ||
| 577 | targetid = taskData.getbuild_id(target[0]) | ||
| 578 | |||
| 579 | if targetid not in taskData.build_targets: | ||
| 580 | continue | ||
| 581 | |||
| 582 | if targetid in taskData.failed_deps: | ||
| 583 | continue | ||
| 584 | |||
| 585 | fnid = taskData.build_targets[targetid][0] | ||
| 586 | fn = taskData.fn_index[fnid] | ||
| 587 | self.target_pairs.append((fn, target[1])) | ||
| 588 | |||
| 589 | # Remove stamps for targets if force mode active | ||
| 590 | if self.cooker.configuration.force: | ||
| 591 | bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn)) | ||
| 592 | bb.build.del_stamp(target[1], self.dataCache, fn) | ||
| 593 | |||
| 594 | if fnid in taskData.failed_fnids: | ||
| 595 | continue | ||
| 596 | |||
| 597 | if target[1] not in taskData.tasks_lookup[fnid]: | ||
| 598 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0])) | ||
| 599 | |||
| 600 | listid = taskData.tasks_lookup[fnid][target[1]] | ||
| 601 | |||
| 602 | mark_active(listid, 1) | ||
| 603 | |||
| 604 | # Step C - Prune all inactive tasks | ||
| 605 | # | ||
| 606 | # Once all active tasks are marked, prune the ones we don't need. | ||
| 607 | |||
| 608 | maps = [] | ||
| 609 | delcount = 0 | ||
| 610 | for listid in range(len(self.runq_fnid)): | ||
| 611 | if runq_build[listid-delcount] == 1: | ||
| 612 | maps.append(listid-delcount) | ||
| 613 | else: | ||
| 614 | del self.runq_fnid[listid-delcount] | ||
| 615 | del self.runq_task[listid-delcount] | ||
| 616 | del self.runq_depends[listid-delcount] | ||
| 617 | del runq_build[listid-delcount] | ||
| 618 | del self.runq_revdeps[listid-delcount] | ||
| 619 | delcount = delcount + 1 | ||
| 620 | maps.append(-1) | ||
| 621 | |||
| 622 | # | ||
| 623 | # Step D - Sanity checks and computation | ||
| 624 | # | ||
| 625 | |||
| 626 | # Check to make sure we still have tasks to run | ||
| 627 | if len(self.runq_fnid) == 0: | ||
| 628 | if not taskData.abort: | ||
| 629 | bb.msg.fatal(bb.msg.domain.RunQueue, "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") | ||
| 630 | else: | ||
| 631 | bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.") | ||
| 632 | |||
| 633 | bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) | ||
| 634 | |||
| 635 | # Remap the dependencies to account for the deleted tasks | ||
| 636 | # Check we didn't delete a task we depend on | ||
| 637 | for listid in range(len(self.runq_fnid)): | ||
| 638 | newdeps = [] | ||
| 639 | origdeps = self.runq_depends[listid] | ||
| 640 | for origdep in origdeps: | ||
| 641 | if maps[origdep] == -1: | ||
| 642 | bb.msg.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!") | ||
| 643 | newdeps.append(maps[origdep]) | ||
| 644 | self.runq_depends[listid] = Set(newdeps) | ||
| 645 | |||
| 646 | bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") | ||
| 647 | |||
| 648 | # Generate a list of reverse dependencies to ease future calculations | ||
| 649 | for listid in range(len(self.runq_fnid)): | ||
| 650 | for dep in self.runq_depends[listid]: | ||
| 651 | self.runq_revdeps[dep].add(listid) | ||
| 652 | |||
| 653 | # Identify tasks at the end of dependency chains | ||
| 654 | # Error on circular dependency loops (length two) | ||
| 655 | endpoints = [] | ||
| 656 | for listid in range(len(self.runq_fnid)): | ||
| 657 | revdeps = self.runq_revdeps[listid] | ||
| 658 | if len(revdeps) == 0: | ||
| 659 | endpoints.append(listid) | ||
| 660 | for dep in revdeps: | ||
| 661 | if dep in self.runq_depends[listid]: | ||
| 662 | #self.dump_data(taskData) | ||
| 663 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) | ||
| 664 | |||
| 665 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) | ||
| 666 | |||
| 667 | # Calculate task weights | ||
| 668 | # Check of higher length circular dependencies | ||
| 669 | self.runq_weight = self.calculate_task_weights(endpoints) | ||
| 670 | |||
| 671 | # Decide what order to execute the tasks in, pick a scheduler | ||
| 672 | #self.sched = RunQueueScheduler(self) | ||
| 673 | if self.scheduler == "completion": | ||
| 674 | self.sched = RunQueueSchedulerCompletion(self) | ||
| 675 | else: | ||
| 676 | self.sched = RunQueueSchedulerSpeed(self) | ||
| 677 | |||
| 678 | # Sanity Check - Check for multiple tasks building the same provider | ||
| 679 | prov_list = {} | ||
| 680 | seen_fn = [] | ||
| 681 | for task in range(len(self.runq_fnid)): | ||
| 682 | fn = taskData.fn_index[self.runq_fnid[task]] | ||
| 683 | if fn in seen_fn: | ||
| 684 | continue | ||
| 685 | seen_fn.append(fn) | ||
| 686 | for prov in self.dataCache.fn_provides[fn]: | ||
| 687 | if prov not in prov_list: | ||
| 688 | prov_list[prov] = [fn] | ||
| 689 | elif fn not in prov_list[prov]: | ||
| 690 | prov_list[prov].append(fn) | ||
| 691 | error = False | ||
| 692 | for prov in prov_list: | ||
| 693 | if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist: | ||
| 694 | error = True | ||
| 695 | bb.msg.error(bb.msg.domain.RunQueue, "Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should." % (prov, " ".join(prov_list[prov]))) | ||
| 696 | #if error: | ||
| 697 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") | ||
| 698 | |||
| 699 | |||
| 700 | # Create a whitelist usable by the stamp checks | ||
| 701 | stampfnwhitelist = [] | ||
| 702 | for entry in self.stampwhitelist.split(): | ||
| 703 | entryid = self.taskData.getbuild_id(entry) | ||
| 704 | if entryid not in self.taskData.build_targets: | ||
| 705 | continue | ||
| 706 | fnid = self.taskData.build_targets[entryid][0] | ||
| 707 | fn = self.taskData.fn_index[fnid] | ||
| 708 | stampfnwhitelist.append(fn) | ||
| 709 | self.stampfnwhitelist = stampfnwhitelist | ||
| 710 | |||
| 711 | #self.dump_data(taskData) | ||
| 712 | |||
| 713 | self.state = runQueueRunInit | ||
| 714 | |||
| 715 | def check_stamps(self): | ||
| 716 | unchecked = {} | ||
| 717 | current = [] | ||
| 718 | notcurrent = [] | ||
| 719 | buildable = [] | ||
| 720 | |||
| 721 | if self.stamppolicy == "perfile": | ||
| 722 | fulldeptree = False | ||
| 723 | else: | ||
| 724 | fulldeptree = True | ||
| 725 | stampwhitelist = [] | ||
| 726 | if self.stamppolicy == "whitelist": | ||
| 727 | stampwhitelist = self.self.stampfnwhitelist | ||
| 728 | |||
| 729 | for task in range(len(self.runq_fnid)): | ||
| 730 | unchecked[task] = "" | ||
| 731 | if len(self.runq_depends[task]) == 0: | ||
| 732 | buildable.append(task) | ||
| 733 | |||
| 734 | def check_buildable(self, task, buildable): | ||
| 735 | for revdep in self.runq_revdeps[task]: | ||
| 736 | alldeps = 1 | ||
| 737 | for dep in self.runq_depends[revdep]: | ||
| 738 | if dep in unchecked: | ||
| 739 | alldeps = 0 | ||
| 740 | if alldeps == 1: | ||
| 741 | if revdep in unchecked: | ||
| 742 | buildable.append(revdep) | ||
| 743 | |||
| 744 | for task in range(len(self.runq_fnid)): | ||
| 745 | if task not in unchecked: | ||
| 746 | continue | ||
| 747 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
| 748 | taskname = self.runq_task[task] | ||
| 749 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
| 750 | # If the stamp is missing its not current | ||
| 751 | if not os.access(stampfile, os.F_OK): | ||
| 752 | del unchecked[task] | ||
| 753 | notcurrent.append(task) | ||
| 754 | check_buildable(self, task, buildable) | ||
| 755 | continue | ||
| 756 | # If its a 'nostamp' task, it's not current | ||
| 757 | taskdep = self.dataCache.task_deps[fn] | ||
| 758 | if 'nostamp' in taskdep and task in taskdep['nostamp']: | ||
| 759 | del unchecked[task] | ||
| 760 | notcurrent.append(task) | ||
| 761 | check_buildable(self, task, buildable) | ||
| 762 | continue | ||
| 763 | |||
| 764 | while (len(buildable) > 0): | ||
| 765 | nextbuildable = [] | ||
| 766 | for task in buildable: | ||
| 767 | if task in unchecked: | ||
| 768 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
| 769 | taskname = self.runq_task[task] | ||
| 770 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
| 771 | iscurrent = True | ||
| 772 | |||
| 773 | t1 = os.stat(stampfile)[stat.ST_MTIME] | ||
| 774 | for dep in self.runq_depends[task]: | ||
| 775 | if iscurrent: | ||
| 776 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | ||
| 777 | taskname2 = self.runq_task[dep] | ||
| 778 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | ||
| 779 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): | ||
| 780 | if dep in notcurrent: | ||
| 781 | iscurrent = False | ||
| 782 | else: | ||
| 783 | t2 = os.stat(stampfile2)[stat.ST_MTIME] | ||
| 784 | if t1 < t2: | ||
| 785 | iscurrent = False | ||
| 786 | del unchecked[task] | ||
| 787 | if iscurrent: | ||
| 788 | current.append(task) | ||
| 789 | else: | ||
| 790 | notcurrent.append(task) | ||
| 791 | |||
| 792 | check_buildable(self, task, nextbuildable) | ||
| 793 | |||
| 794 | buildable = nextbuildable | ||
| 795 | |||
| 796 | #for task in range(len(self.runq_fnid)): | ||
| 797 | # fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
| 798 | # taskname = self.runq_task[task] | ||
| 799 | # print "%s %s.%s" % (task, taskname, fn) | ||
| 800 | |||
| 801 | #print "Unchecked: %s" % unchecked | ||
| 802 | #print "Current: %s" % current | ||
| 803 | #print "Not current: %s" % notcurrent | ||
| 804 | |||
| 805 | if len(unchecked) > 0: | ||
| 806 | bb.fatal("check_stamps fatal internal error") | ||
| 807 | return current | ||
| 808 | |||
| 809 | def check_stamp_task(self, task): | ||
| 810 | |||
| 811 | if self.stamppolicy == "perfile": | ||
| 812 | fulldeptree = False | ||
| 813 | else: | ||
| 814 | fulldeptree = True | ||
| 815 | stampwhitelist = [] | ||
| 816 | if self.stamppolicy == "whitelist": | ||
| 817 | stampwhitelist = self.stampfnwhitelist | ||
| 818 | |||
| 819 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
| 820 | taskname = self.runq_task[task] | ||
| 821 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
| 822 | # If the stamp is missing its not current | ||
| 823 | if not os.access(stampfile, os.F_OK): | ||
| 824 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s not available\n" % stampfile) | ||
| 825 | return False | ||
| 826 | # If its a 'nostamp' task, it's not current | ||
| 827 | taskdep = self.dataCache.task_deps[fn] | ||
| 828 | if 'nostamp' in taskdep and task in taskdep['nostamp']: | ||
| 829 | bb.msg.debug(2, bb.msg.domain.RunQueue, "%s.%s is nostamp\n" % (fn, taskname)) | ||
| 830 | return False | ||
| 831 | |||
| 832 | iscurrent = True | ||
| 833 | t1 = os.stat(stampfile)[stat.ST_MTIME] | ||
| 834 | for dep in self.runq_depends[task]: | ||
| 835 | if iscurrent: | ||
| 836 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | ||
| 837 | taskname2 = self.runq_task[dep] | ||
| 838 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | ||
| 839 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): | ||
| 840 | try: | ||
| 841 | t2 = os.stat(stampfile2)[stat.ST_MTIME] | ||
| 842 | if t1 < t2: | ||
| 843 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile,stampfile2)) | ||
| 844 | iscurrent = False | ||
| 845 | except: | ||
| 846 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 ,stampfile)) | ||
| 847 | iscurrent = False | ||
| 848 | |||
| 849 | return iscurrent | ||
| 850 | |||
| 851 | def execute_runqueue(self): | ||
| 852 | """ | ||
| 853 | Run the tasks in a queue prepared by prepare_runqueue | ||
| 854 | Upon failure, optionally try to recover the build using any alternate providers | ||
| 855 | (if the abort on failure configuration option isn't set) | ||
| 856 | """ | ||
| 857 | |||
| 858 | if self.state is runQueuePrepare: | ||
| 859 | self.prepare_runqueue() | ||
| 860 | |||
| 861 | if self.state is runQueueRunInit: | ||
| 862 | bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue") | ||
| 863 | self.execute_runqueue_initVars() | ||
| 864 | |||
| 865 | if self.state is runQueueRunning: | ||
| 866 | self.execute_runqueue_internal() | ||
| 867 | |||
| 868 | if self.state is runQueueCleanUp: | ||
| 869 | self.finish_runqueue() | ||
| 870 | |||
| 871 | if self.state is runQueueFailed: | ||
| 872 | if self.taskData.abort: | ||
| 873 | raise bb.runqueue.TaskFailure(self.failed_fnids) | ||
| 874 | for fnid in self.failed_fnids: | ||
| 875 | self.taskData.fail_fnid(fnid) | ||
| 876 | self.reset_runqueue() | ||
| 877 | |||
| 878 | if self.state is runQueueComplete: | ||
| 879 | # All done | ||
| 880 | bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed)) | ||
| 881 | return False | ||
| 882 | |||
| 883 | if self.state is runQueueChildProcess: | ||
| 884 | print "Child process" | ||
| 885 | return False | ||
| 886 | |||
| 887 | # Loop | ||
| 888 | return True | ||
| 889 | |||
| 890 | def execute_runqueue_initVars(self): | ||
| 891 | |||
| 892 | self.stats = RunQueueStats(len(self.runq_fnid)) | ||
| 893 | |||
| 894 | self.runq_buildable = [] | ||
| 895 | self.runq_running = [] | ||
| 896 | self.runq_complete = [] | ||
| 897 | self.build_pids = {} | ||
| 898 | self.failed_fnids = [] | ||
| 899 | |||
| 900 | # Mark initial buildable tasks | ||
| 901 | for task in range(self.stats.total): | ||
| 902 | self.runq_running.append(0) | ||
| 903 | self.runq_complete.append(0) | ||
| 904 | if len(self.runq_depends[task]) == 0: | ||
| 905 | self.runq_buildable.append(1) | ||
| 906 | else: | ||
| 907 | self.runq_buildable.append(0) | ||
| 908 | |||
| 909 | self.state = runQueueRunning | ||
| 910 | |||
| 911 | event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgData)) | ||
| 912 | |||
| 913 | def task_complete(self, task): | ||
| 914 | """ | ||
| 915 | Mark a task as completed | ||
| 916 | Look at the reverse dependencies and mark any task with | ||
| 917 | completed dependencies as buildable | ||
| 918 | """ | ||
| 919 | self.runq_complete[task] = 1 | ||
| 920 | for revdep in self.runq_revdeps[task]: | ||
| 921 | if self.runq_running[revdep] == 1: | ||
| 922 | continue | ||
| 923 | if self.runq_buildable[revdep] == 1: | ||
| 924 | continue | ||
| 925 | alldeps = 1 | ||
| 926 | for dep in self.runq_depends[revdep]: | ||
| 927 | if self.runq_complete[dep] != 1: | ||
| 928 | alldeps = 0 | ||
| 929 | if alldeps == 1: | ||
| 930 | self.runq_buildable[revdep] = 1 | ||
| 931 | fn = self.taskData.fn_index[self.runq_fnid[revdep]] | ||
| 932 | taskname = self.runq_task[revdep] | ||
| 933 | bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) | ||
| 934 | |||
| 935 | def task_fail(self, task, exitcode): | ||
| 936 | """ | ||
| 937 | Called when a task has failed | ||
| 938 | Updates the state engine with the failure | ||
| 939 | """ | ||
| 940 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed with %s" % (task, self.get_user_idstring(task), exitcode)) | ||
| 941 | self.stats.taskFailed() | ||
| 942 | fnid = self.runq_fnid[task] | ||
| 943 | self.failed_fnids.append(fnid) | ||
| 944 | bb.event.fire(runQueueTaskFailed(task, self.stats, self, self.cfgData)) | ||
| 945 | if self.taskData.abort: | ||
| 946 | self.state = runQueueCleanup | ||
| 947 | |||
| 948 | def execute_runqueue_internal(self): | ||
| 949 | """ | ||
| 950 | Run the tasks in a queue prepared by prepare_runqueue | ||
| 951 | """ | ||
| 952 | |||
| 953 | if self.stats.total == 0: | ||
| 954 | # nothing to do | ||
| 955 | self.state = runQueueCleanup | ||
| 956 | |||
| 957 | while True: | ||
| 958 | task = None | ||
| 959 | if self.stats.active < self.number_tasks: | ||
| 960 | task = self.sched.next() | ||
| 961 | if task is not None: | ||
| 962 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
| 963 | |||
| 964 | taskname = self.runq_task[task] | ||
| 965 | if self.check_stamp_task(task): | ||
| 966 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) | ||
| 967 | self.runq_running[task] = 1 | ||
| 968 | self.runq_buildable[task] = 1 | ||
| 969 | self.task_complete(task) | ||
| 970 | self.stats.taskCompleted() | ||
| 971 | self.stats.taskSkipped() | ||
| 972 | continue | ||
| 973 | |||
| 974 | bb.event.fire(runQueueTaskStarted(task, self.stats, self, self.cfgData)) | ||
| 975 | bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.stats.active + 1, self.stats.total, task, self.get_user_idstring(task))) | ||
| 976 | sys.stdout.flush() | ||
| 977 | sys.stderr.flush() | ||
| 978 | try: | ||
| 979 | pid = os.fork() | ||
| 980 | except OSError, e: | ||
| 981 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) | ||
| 982 | if pid == 0: | ||
| 983 | self.state = runQueueChildProcess | ||
| 984 | # Make the child the process group leader | ||
| 985 | os.setpgid(0, 0) | ||
| 986 | newsi = os.open('/dev/null', os.O_RDWR) | ||
| 987 | os.dup2(newsi, sys.stdin.fileno()) | ||
| 988 | self.cooker.configuration.cmd = taskname[3:] | ||
| 989 | bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data) | ||
| 990 | try: | ||
| 991 | self.cooker.tryBuild(fn) | ||
| 992 | except bb.build.EventException: | ||
| 993 | bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed") | ||
| 994 | sys.exit(1) | ||
| 995 | except: | ||
| 996 | bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed") | ||
| 997 | raise | ||
| 998 | sys.exit(0) | ||
| 999 | self.build_pids[pid] = task | ||
| 1000 | self.runq_running[task] = 1 | ||
| 1001 | self.stats.taskActive() | ||
| 1002 | if self.stats.active < self.number_tasks: | ||
| 1003 | continue | ||
| 1004 | if self.stats.active > 0: | ||
| 1005 | result = os.waitpid(-1, os.WNOHANG) | ||
| 1006 | if result[0] is 0 and result[1] is 0: | ||
| 1007 | return | ||
| 1008 | task = self.build_pids[result[0]] | ||
| 1009 | del self.build_pids[result[0]] | ||
| 1010 | if result[1] != 0: | ||
| 1011 | self.task_fail(task, result[1]) | ||
| 1012 | return | ||
| 1013 | self.task_complete(task) | ||
| 1014 | self.stats.taskCompleted() | ||
| 1015 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self, self.cfgData)) | ||
| 1016 | continue | ||
| 1017 | |||
| 1018 | if len(self.failed_fnids) != 0: | ||
| 1019 | self.state = runQueueFailed | ||
| 1020 | return | ||
| 1021 | |||
| 1022 | # Sanity Checks | ||
| 1023 | for task in range(self.stats.total): | ||
| 1024 | if self.runq_buildable[task] == 0: | ||
| 1025 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task) | ||
| 1026 | if self.runq_running[task] == 0: | ||
| 1027 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task) | ||
| 1028 | if self.runq_complete[task] == 0: | ||
| 1029 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task) | ||
| 1030 | self.state = runQueueComplete | ||
| 1031 | return | ||
| 1032 | |||
| 1033 | def finish_runqueue_now(self): | ||
| 1034 | bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.stats.active) | ||
| 1035 | for k, v in self.build_pids.iteritems(): | ||
| 1036 | try: | ||
| 1037 | os.kill(-k, signal.SIGINT) | ||
| 1038 | except: | ||
| 1039 | pass | ||
| 1040 | |||
| 1041 | def finish_runqueue(self, now = False): | ||
| 1042 | self.state = runQueueCleanUp | ||
| 1043 | if now: | ||
| 1044 | self.finish_runqueue_now() | ||
| 1045 | try: | ||
| 1046 | while self.stats.active > 0: | ||
| 1047 | bb.event.fire(runQueueExitWait(self.stats.active, self.cfgData)) | ||
| 1048 | bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.stats.active) | ||
| 1049 | tasknum = 1 | ||
| 1050 | for k, v in self.build_pids.iteritems(): | ||
| 1051 | bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k)) | ||
| 1052 | tasknum = tasknum + 1 | ||
| 1053 | result = os.waitpid(-1, os.WNOHANG) | ||
| 1054 | if result[0] is 0 and result[1] is 0: | ||
| 1055 | return | ||
| 1056 | task = self.build_pids[result[0]] | ||
| 1057 | del self.build_pids[result[0]] | ||
| 1058 | if result[1] != 0: | ||
| 1059 | self.task_fail(task, result[1]) | ||
| 1060 | else: | ||
| 1061 | self.stats.taskCompleted() | ||
| 1062 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self, self.cfgData)) | ||
| 1063 | except: | ||
| 1064 | self.finish_runqueue_now() | ||
| 1065 | raise | ||
| 1066 | |||
| 1067 | if len(self.failed_fnids) != 0: | ||
| 1068 | self.state = runQueueFailed | ||
| 1069 | return | ||
| 1070 | |||
| 1071 | self.state = runQueueComplete | ||
| 1072 | return | ||
| 1073 | |||
| 1074 | def dump_data(self, taskQueue): | ||
| 1075 | """ | ||
| 1076 | Dump some debug information on the internal data structures | ||
| 1077 | """ | ||
| 1078 | bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:") | ||
| 1079 | for task in range(len(self.runq_task)): | ||
| 1080 | bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, | ||
| 1081 | taskQueue.fn_index[self.runq_fnid[task]], | ||
| 1082 | self.runq_task[task], | ||
| 1083 | self.runq_weight[task], | ||
| 1084 | self.runq_depends[task], | ||
| 1085 | self.runq_revdeps[task])) | ||
| 1086 | |||
| 1087 | bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:") | ||
| 1088 | for task1 in range(len(self.runq_task)): | ||
| 1089 | if task1 in self.prio_map: | ||
| 1090 | task = self.prio_map[task1] | ||
| 1091 | bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, | ||
| 1092 | taskQueue.fn_index[self.runq_fnid[task]], | ||
| 1093 | self.runq_task[task], | ||
| 1094 | self.runq_weight[task], | ||
| 1095 | self.runq_depends[task], | ||
| 1096 | self.runq_revdeps[task])) | ||
| 1097 | |||
| 1098 | |||
| 1099 | class TaskFailure(Exception): | ||
| 1100 | """ | ||
| 1101 | Exception raised when a task in a runqueue fails | ||
| 1102 | """ | ||
| 1103 | def __init__(self, x): | ||
| 1104 | self.args = x | ||
| 1105 | |||
| 1106 | |||
| 1107 | class runQueueExitWait(bb.event.Event): | ||
| 1108 | """ | ||
| 1109 | Event when waiting for task processes to exit | ||
| 1110 | """ | ||
| 1111 | |||
| 1112 | def __init__(self, remain, d): | ||
| 1113 | self.remain = remain | ||
| 1114 | self.message = "Waiting for %s active tasks to finish" % remain | ||
| 1115 | bb.event.Event.__init__(self, d) | ||
| 1116 | |||
| 1117 | class runQueueEvent(bb.event.Event): | ||
| 1118 | """ | ||
| 1119 | Base runQueue event class | ||
| 1120 | """ | ||
| 1121 | def __init__(self, task, stats, rq, d): | ||
| 1122 | self.taskid = task | ||
| 1123 | self.taskstring = rq.get_user_idstring(task) | ||
| 1124 | self.stats = stats | ||
| 1125 | bb.event.Event.__init__(self, d) | ||
| 1126 | |||
| 1127 | class runQueueTaskStarted(runQueueEvent): | ||
| 1128 | """ | ||
| 1129 | Event notifing a task was started | ||
| 1130 | """ | ||
| 1131 | def __init__(self, task, stats, rq, d): | ||
| 1132 | runQueueEvent.__init__(self, task, stats, rq, d) | ||
| 1133 | self.message = "Running task %s (%d of %d) (%s)" % (task, stats.completed + stats.active + 1, self.stats.total, self.taskstring) | ||
| 1134 | |||
| 1135 | class runQueueTaskFailed(runQueueEvent): | ||
| 1136 | """ | ||
| 1137 | Event notifing a task failed | ||
| 1138 | """ | ||
| 1139 | def __init__(self, task, stats, rq, d): | ||
| 1140 | runQueueEvent.__init__(self, task, stats, rq, d) | ||
| 1141 | self.message = "Task %s failed (%s)" % (task, self.taskstring) | ||
| 1142 | |||
| 1143 | class runQueueTaskCompleted(runQueueEvent): | ||
| 1144 | """ | ||
| 1145 | Event notifing a task completed | ||
| 1146 | """ | ||
| 1147 | def __init__(self, task, stats, rq, d): | ||
| 1148 | runQueueEvent.__init__(self, task, stats, rq, d) | ||
| 1149 | self.message = "Task %s completed (%s)" % (task, self.taskstring) | ||
| 1150 | |||
| 1151 | def check_stamp_fn(fn, taskname, d): | ||
| 1152 | rq = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d) | ||
| 1153 | fnid = rq.taskData.getfn_id(fn) | ||
| 1154 | taskid = rq.get_task_id(fnid, taskname) | ||
| 1155 | if taskid is not None: | ||
| 1156 | return rq.check_stamp_task(taskid) | ||
| 1157 | return None | ||
diff --git a/bitbake-dev/lib/bb/shell.py b/bitbake-dev/lib/bb/shell.py new file mode 100644 index 0000000000..34828fe425 --- /dev/null +++ b/bitbake-dev/lib/bb/shell.py | |||
| @@ -0,0 +1,827 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | ########################################################################## | ||
| 4 | # | ||
| 5 | # Copyright (C) 2005-2006 Michael 'Mickey' Lauer <mickey@Vanille.de> | ||
| 6 | # Copyright (C) 2005-2006 Vanille Media | ||
| 7 | # | ||
| 8 | # This program is free software; you can redistribute it and/or modify | ||
| 9 | # it under the terms of the GNU General Public License version 2 as | ||
| 10 | # published by the Free Software Foundation. | ||
| 11 | # | ||
| 12 | # This program is distributed in the hope that it will be useful, | ||
| 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | # GNU General Public License for more details. | ||
| 16 | # | ||
| 17 | # You should have received a copy of the GNU General Public License along | ||
| 18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 20 | # | ||
| 21 | ########################################################################## | ||
| 22 | # | ||
| 23 | # Thanks to: | ||
| 24 | # * Holger Freyther <zecke@handhelds.org> | ||
| 25 | # * Justin Patrin <papercrane@reversefold.com> | ||
| 26 | # | ||
| 27 | ########################################################################## | ||
| 28 | |||
| 29 | """ | ||
| 30 | BitBake Shell | ||
| 31 | |||
| 32 | IDEAS: | ||
| 33 | * list defined tasks per package | ||
| 34 | * list classes | ||
| 35 | * toggle force | ||
| 36 | * command to reparse just one (or more) bbfile(s) | ||
| 37 | * automatic check if reparsing is necessary (inotify?) | ||
| 38 | * frontend for bb file manipulation | ||
| 39 | * more shell-like features: | ||
| 40 | - output control, i.e. pipe output into grep, sort, etc. | ||
| 41 | - job control, i.e. bring running commands into background and foreground | ||
| 42 | * start parsing in background right after startup | ||
| 43 | * ncurses interface | ||
| 44 | |||
| 45 | PROBLEMS: | ||
| 46 | * force doesn't always work | ||
| 47 | * readline completion for commands with more than one parameters | ||
| 48 | |||
| 49 | """ | ||
| 50 | |||
| 51 | ########################################################################## | ||
| 52 | # Import and setup global variables | ||
| 53 | ########################################################################## | ||
| 54 | |||
| 55 | try: | ||
| 56 | set | ||
| 57 | except NameError: | ||
| 58 | from sets import Set as set | ||
| 59 | import sys, os, readline, socket, httplib, urllib, commands, popen2, copy, shlex, Queue, fnmatch | ||
| 60 | from bb import data, parse, build, fatal, cache, taskdata, runqueue, providers as Providers | ||
| 61 | |||
| 62 | __version__ = "0.5.3.1" | ||
| 63 | __credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de> | ||
| 64 | Type 'help' for more information, press CTRL-D to exit.""" % __version__ | ||
| 65 | |||
| 66 | cmds = {} | ||
| 67 | leave_mainloop = False | ||
| 68 | last_exception = None | ||
| 69 | cooker = None | ||
| 70 | parsed = False | ||
| 71 | debug = os.environ.get( "BBSHELL_DEBUG", "" ) | ||
| 72 | |||
| 73 | ########################################################################## | ||
| 74 | # Class BitBakeShellCommands | ||
| 75 | ########################################################################## | ||
| 76 | |||
| 77 | class BitBakeShellCommands: | ||
| 78 | """This class contains the valid commands for the shell""" | ||
| 79 | |||
| 80 | def __init__( self, shell ): | ||
| 81 | """Register all the commands""" | ||
| 82 | self._shell = shell | ||
| 83 | for attr in BitBakeShellCommands.__dict__: | ||
| 84 | if not attr.startswith( "_" ): | ||
| 85 | if attr.endswith( "_" ): | ||
| 86 | command = attr[:-1].lower() | ||
| 87 | else: | ||
| 88 | command = attr[:].lower() | ||
| 89 | method = getattr( BitBakeShellCommands, attr ) | ||
| 90 | debugOut( "registering command '%s'" % command ) | ||
| 91 | # scan number of arguments | ||
| 92 | usage = getattr( method, "usage", "" ) | ||
| 93 | if usage != "<...>": | ||
| 94 | numArgs = len( usage.split() ) | ||
| 95 | else: | ||
| 96 | numArgs = -1 | ||
| 97 | shell.registerCommand( command, method, numArgs, "%s %s" % ( command, usage ), method.__doc__ ) | ||
| 98 | |||
| 99 | def _checkParsed( self ): | ||
| 100 | if not parsed: | ||
| 101 | print "SHELL: This command needs to parse bbfiles..." | ||
| 102 | self.parse( None ) | ||
| 103 | |||
| 104 | def _findProvider( self, item ): | ||
| 105 | self._checkParsed() | ||
| 106 | # Need to use taskData for this information | ||
| 107 | preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 ) | ||
| 108 | if not preferred: preferred = item | ||
| 109 | try: | ||
| 110 | lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status) | ||
| 111 | except KeyError: | ||
| 112 | if item in cooker.status.providers: | ||
| 113 | pf = cooker.status.providers[item][0] | ||
| 114 | else: | ||
| 115 | pf = None | ||
| 116 | return pf | ||
| 117 | |||
| 118 | def alias( self, params ): | ||
| 119 | """Register a new name for a command""" | ||
| 120 | new, old = params | ||
| 121 | if not old in cmds: | ||
| 122 | print "ERROR: Command '%s' not known" % old | ||
| 123 | else: | ||
| 124 | cmds[new] = cmds[old] | ||
| 125 | print "OK" | ||
| 126 | alias.usage = "<alias> <command>" | ||
| 127 | |||
| 128 | def buffer( self, params ): | ||
| 129 | """Dump specified output buffer""" | ||
| 130 | index = params[0] | ||
| 131 | print self._shell.myout.buffer( int( index ) ) | ||
| 132 | buffer.usage = "<index>" | ||
| 133 | |||
| 134 | def buffers( self, params ): | ||
| 135 | """Show the available output buffers""" | ||
| 136 | commands = self._shell.myout.bufferedCommands() | ||
| 137 | if not commands: | ||
| 138 | print "SHELL: No buffered commands available yet. Start doing something." | ||
| 139 | else: | ||
| 140 | print "="*35, "Available Output Buffers", "="*27 | ||
| 141 | for index, cmd in enumerate( commands ): | ||
| 142 | print "| %s %s" % ( str( index ).ljust( 3 ), cmd ) | ||
| 143 | print "="*88 | ||
| 144 | |||
| 145 | def build( self, params, cmd = "build" ): | ||
| 146 | """Build a providee""" | ||
| 147 | global last_exception | ||
| 148 | globexpr = params[0] | ||
| 149 | self._checkParsed() | ||
| 150 | names = globfilter( cooker.status.pkg_pn.keys(), globexpr ) | ||
| 151 | if len( names ) == 0: names = [ globexpr ] | ||
| 152 | print "SHELL: Building %s" % ' '.join( names ) | ||
| 153 | |||
| 154 | oldcmd = cooker.configuration.cmd | ||
| 155 | cooker.configuration.cmd = cmd | ||
| 156 | |||
| 157 | td = taskdata.TaskData(cooker.configuration.abort) | ||
| 158 | localdata = data.createCopy(cooker.configuration.data) | ||
| 159 | data.update_data(localdata) | ||
| 160 | data.expandKeys(localdata) | ||
| 161 | |||
| 162 | try: | ||
| 163 | tasks = [] | ||
| 164 | for name in names: | ||
| 165 | td.add_provider(localdata, cooker.status, name) | ||
| 166 | providers = td.get_provider(name) | ||
| 167 | |||
| 168 | if len(providers) == 0: | ||
| 169 | raise Providers.NoProvider | ||
| 170 | |||
| 171 | tasks.append([name, "do_%s" % cooker.configuration.cmd]) | ||
| 172 | |||
| 173 | td.add_unresolved(localdata, cooker.status) | ||
| 174 | |||
| 175 | rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) | ||
| 176 | rq.prepare_runqueue() | ||
| 177 | rq.execute_runqueue() | ||
| 178 | |||
| 179 | except Providers.NoProvider: | ||
| 180 | print "ERROR: No Provider" | ||
| 181 | last_exception = Providers.NoProvider | ||
| 182 | |||
| 183 | except runqueue.TaskFailure, fnids: | ||
| 184 | for fnid in fnids: | ||
| 185 | print "ERROR: '%s' failed" % td.fn_index[fnid] | ||
| 186 | last_exception = runqueue.TaskFailure | ||
| 187 | |||
| 188 | except build.EventException, e: | ||
| 189 | print "ERROR: Couldn't build '%s'" % names | ||
| 190 | last_exception = e | ||
| 191 | |||
| 192 | cooker.configuration.cmd = oldcmd | ||
| 193 | |||
| 194 | build.usage = "<providee>" | ||
| 195 | |||
| 196 | def clean( self, params ): | ||
| 197 | """Clean a providee""" | ||
| 198 | self.build( params, "clean" ) | ||
| 199 | clean.usage = "<providee>" | ||
| 200 | |||
| 201 | def compile( self, params ): | ||
| 202 | """Execute 'compile' on a providee""" | ||
| 203 | self.build( params, "compile" ) | ||
| 204 | compile.usage = "<providee>" | ||
| 205 | |||
| 206 | def configure( self, params ): | ||
| 207 | """Execute 'configure' on a providee""" | ||
| 208 | self.build( params, "configure" ) | ||
| 209 | configure.usage = "<providee>" | ||
| 210 | |||
| 211 | def edit( self, params ): | ||
| 212 | """Call $EDITOR on a providee""" | ||
| 213 | name = params[0] | ||
| 214 | bbfile = self._findProvider( name ) | ||
| 215 | if bbfile is not None: | ||
| 216 | os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) ) | ||
| 217 | else: | ||
| 218 | print "ERROR: Nothing provides '%s'" % name | ||
| 219 | edit.usage = "<providee>" | ||
| 220 | |||
| 221 | def environment( self, params ): | ||
| 222 | """Dump out the outer BitBake environment""" | ||
| 223 | cooker.showEnvironment() | ||
| 224 | |||
| 225 | def exit_( self, params ): | ||
| 226 | """Leave the BitBake Shell""" | ||
| 227 | debugOut( "setting leave_mainloop to true" ) | ||
| 228 | global leave_mainloop | ||
| 229 | leave_mainloop = True | ||
| 230 | |||
| 231 | def fetch( self, params ): | ||
| 232 | """Fetch a providee""" | ||
| 233 | self.build( params, "fetch" ) | ||
| 234 | fetch.usage = "<providee>" | ||
| 235 | |||
| 236 | def fileBuild( self, params, cmd = "build" ): | ||
| 237 | """Parse and build a .bb file""" | ||
| 238 | global last_exception | ||
| 239 | name = params[0] | ||
| 240 | bf = completeFilePath( name ) | ||
| 241 | print "SHELL: Calling '%s' on '%s'" % ( cmd, bf ) | ||
| 242 | |||
| 243 | oldcmd = cooker.configuration.cmd | ||
| 244 | cooker.configuration.cmd = cmd | ||
| 245 | |||
| 246 | try: | ||
| 247 | cooker.buildFile(bf) | ||
| 248 | except parse.ParseError: | ||
| 249 | print "ERROR: Unable to open or parse '%s'" % bf | ||
| 250 | except build.EventException, e: | ||
| 251 | print "ERROR: Couldn't build '%s'" % name | ||
| 252 | last_exception = e | ||
| 253 | |||
| 254 | cooker.configuration.cmd = oldcmd | ||
| 255 | fileBuild.usage = "<bbfile>" | ||
| 256 | |||
| 257 | def fileClean( self, params ): | ||
| 258 | """Clean a .bb file""" | ||
| 259 | self.fileBuild( params, "clean" ) | ||
| 260 | fileClean.usage = "<bbfile>" | ||
| 261 | |||
| 262 | def fileEdit( self, params ): | ||
| 263 | """Call $EDITOR on a .bb file""" | ||
| 264 | name = params[0] | ||
| 265 | os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), completeFilePath( name ) ) ) | ||
| 266 | fileEdit.usage = "<bbfile>" | ||
| 267 | |||
| 268 | def fileRebuild( self, params ): | ||
| 269 | """Rebuild (clean & build) a .bb file""" | ||
| 270 | self.fileBuild( params, "rebuild" ) | ||
| 271 | fileRebuild.usage = "<bbfile>" | ||
| 272 | |||
| 273 | def fileReparse( self, params ): | ||
| 274 | """(re)Parse a bb file""" | ||
| 275 | bbfile = params[0] | ||
| 276 | print "SHELL: Parsing '%s'" % bbfile | ||
| 277 | parse.update_mtime( bbfile ) | ||
| 278 | cooker.bb_cache.cacheValidUpdate(bbfile) | ||
| 279 | fromCache = cooker.bb_cache.loadData(bbfile, cooker.configuration.data) | ||
| 280 | cooker.bb_cache.sync() | ||
| 281 | if False: #fromCache: | ||
| 282 | print "SHELL: File has not been updated, not reparsing" | ||
| 283 | else: | ||
| 284 | print "SHELL: Parsed" | ||
| 285 | fileReparse.usage = "<bbfile>" | ||
| 286 | |||
| 287 | def abort( self, params ): | ||
| 288 | """Toggle abort task execution flag (see bitbake -k)""" | ||
| 289 | cooker.configuration.abort = not cooker.configuration.abort | ||
| 290 | print "SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort ) | ||
| 291 | |||
| 292 | def force( self, params ): | ||
| 293 | """Toggle force task execution flag (see bitbake -f)""" | ||
| 294 | cooker.configuration.force = not cooker.configuration.force | ||
| 295 | print "SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force ) | ||
| 296 | |||
| 297 | def help( self, params ): | ||
| 298 | """Show a comprehensive list of commands and their purpose""" | ||
| 299 | print "="*30, "Available Commands", "="*30 | ||
| 300 | allcmds = cmds.keys() | ||
| 301 | allcmds.sort() | ||
| 302 | for cmd in allcmds: | ||
| 303 | function,numparams,usage,helptext = cmds[cmd] | ||
| 304 | print "| %s | %s" % (usage.ljust(30), helptext) | ||
| 305 | print "="*78 | ||
| 306 | |||
| 307 | def lastError( self, params ): | ||
| 308 | """Show the reason or log that was produced by the last BitBake event exception""" | ||
| 309 | if last_exception is None: | ||
| 310 | print "SHELL: No Errors yet (Phew)..." | ||
| 311 | else: | ||
| 312 | reason, event = last_exception.args | ||
| 313 | print "SHELL: Reason for the last error: '%s'" % reason | ||
| 314 | if ':' in reason: | ||
| 315 | msg, filename = reason.split( ':' ) | ||
| 316 | filename = filename.strip() | ||
| 317 | print "SHELL: Dumping log file for last error:" | ||
| 318 | try: | ||
| 319 | print open( filename ).read() | ||
| 320 | except IOError: | ||
| 321 | print "ERROR: Couldn't open '%s'" % filename | ||
| 322 | |||
| 323 | def match( self, params ): | ||
| 324 | """Dump all files or providers matching a glob expression""" | ||
| 325 | what, globexpr = params | ||
| 326 | if what == "files": | ||
| 327 | self._checkParsed() | ||
| 328 | for key in globfilter( cooker.status.pkg_fn.keys(), globexpr ): print key | ||
| 329 | elif what == "providers": | ||
| 330 | self._checkParsed() | ||
| 331 | for key in globfilter( cooker.status.pkg_pn.keys(), globexpr ): print key | ||
| 332 | else: | ||
| 333 | print "Usage: match %s" % self.print_.usage | ||
| 334 | match.usage = "<files|providers> <glob>" | ||
| 335 | |||
| 336 | def new( self, params ): | ||
| 337 | """Create a new .bb file and open the editor""" | ||
| 338 | dirname, filename = params | ||
| 339 | packages = '/'.join( data.getVar( "BBFILES", cooker.configuration.data, 1 ).split('/')[:-2] ) | ||
| 340 | fulldirname = "%s/%s" % ( packages, dirname ) | ||
| 341 | |||
| 342 | if not os.path.exists( fulldirname ): | ||
| 343 | print "SHELL: Creating '%s'" % fulldirname | ||
| 344 | os.mkdir( fulldirname ) | ||
| 345 | if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ): | ||
| 346 | if os.path.exists( "%s/%s" % ( fulldirname, filename ) ): | ||
| 347 | print "SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename ) | ||
| 348 | return False | ||
| 349 | print "SHELL: Creating '%s/%s'" % ( fulldirname, filename ) | ||
| 350 | newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" ) | ||
| 351 | print >>newpackage,"""DESCRIPTION = "" | ||
| 352 | SECTION = "" | ||
| 353 | AUTHOR = "" | ||
| 354 | HOMEPAGE = "" | ||
| 355 | MAINTAINER = "" | ||
| 356 | LICENSE = "GPL" | ||
| 357 | PR = "r0" | ||
| 358 | |||
| 359 | SRC_URI = "" | ||
| 360 | |||
| 361 | #inherit base | ||
| 362 | |||
| 363 | #do_configure() { | ||
| 364 | # | ||
| 365 | #} | ||
| 366 | |||
| 367 | #do_compile() { | ||
| 368 | # | ||
| 369 | #} | ||
| 370 | |||
| 371 | #do_stage() { | ||
| 372 | # | ||
| 373 | #} | ||
| 374 | |||
| 375 | #do_install() { | ||
| 376 | # | ||
| 377 | #} | ||
| 378 | """ | ||
| 379 | newpackage.close() | ||
| 380 | os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) ) | ||
| 381 | new.usage = "<directory> <filename>" | ||
| 382 | |||
| 383 | def package( self, params ): | ||
| 384 | """Execute 'package' on a providee""" | ||
| 385 | self.build( params, "package" ) | ||
| 386 | package.usage = "<providee>" | ||
| 387 | |||
| 388 | def pasteBin( self, params ): | ||
| 389 | """Send a command + output buffer to the pastebin at http://rafb.net/paste""" | ||
| 390 | index = params[0] | ||
| 391 | contents = self._shell.myout.buffer( int( index ) ) | ||
| 392 | sendToPastebin( "output of " + params[0], contents ) | ||
| 393 | pasteBin.usage = "<index>" | ||
| 394 | |||
| 395 | def pasteLog( self, params ): | ||
| 396 | """Send the last event exception error log (if there is one) to http://rafb.net/paste""" | ||
| 397 | if last_exception is None: | ||
| 398 | print "SHELL: No Errors yet (Phew)..." | ||
| 399 | else: | ||
| 400 | reason, event = last_exception.args | ||
| 401 | print "SHELL: Reason for the last error: '%s'" % reason | ||
| 402 | if ':' in reason: | ||
| 403 | msg, filename = reason.split( ':' ) | ||
| 404 | filename = filename.strip() | ||
| 405 | print "SHELL: Pasting log file to pastebin..." | ||
| 406 | |||
| 407 | file = open( filename ).read() | ||
| 408 | sendToPastebin( "contents of " + filename, file ) | ||
| 409 | |||
| 410 | def patch( self, params ): | ||
| 411 | """Execute 'patch' command on a providee""" | ||
| 412 | self.build( params, "patch" ) | ||
| 413 | patch.usage = "<providee>" | ||
| 414 | |||
| 415 | def parse( self, params ): | ||
| 416 | """(Re-)parse .bb files and calculate the dependency graph""" | ||
| 417 | cooker.status = cache.CacheData() | ||
| 418 | ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or "" | ||
| 419 | cooker.status.ignored_dependencies = set( ignore.split() ) | ||
| 420 | cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) ) | ||
| 421 | |||
| 422 | (filelist, masked) = cooker.collect_bbfiles() | ||
| 423 | cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback) | ||
| 424 | cooker.buildDepgraph() | ||
| 425 | global parsed | ||
| 426 | parsed = True | ||
| 427 | |||
| 428 | |||
| 429 | def reparse( self, params ): | ||
| 430 | """(re)Parse a providee's bb file""" | ||
| 431 | bbfile = self._findProvider( params[0] ) | ||
| 432 | if bbfile is not None: | ||
| 433 | print "SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] ) | ||
| 434 | self.fileReparse( [ bbfile ] ) | ||
| 435 | else: | ||
| 436 | print "ERROR: Nothing provides '%s'" % params[0] | ||
| 437 | reparse.usage = "<providee>" | ||
| 438 | |||
| 439 | def getvar( self, params ): | ||
| 440 | """Dump the contents of an outer BitBake environment variable""" | ||
| 441 | var = params[0] | ||
| 442 | value = data.getVar( var, cooker.configuration.data, 1 ) | ||
| 443 | print value | ||
| 444 | getvar.usage = "<variable>" | ||
| 445 | |||
| 446 | def peek( self, params ): | ||
| 447 | """Dump contents of variable defined in providee's metadata""" | ||
| 448 | name, var = params | ||
| 449 | bbfile = self._findProvider( name ) | ||
| 450 | if bbfile is not None: | ||
| 451 | the_data = cooker.bb_cache.loadDataFull(bbfile, cooker.configuration.data) | ||
| 452 | value = the_data.getVar( var, 1 ) | ||
| 453 | print value | ||
| 454 | else: | ||
| 455 | print "ERROR: Nothing provides '%s'" % name | ||
| 456 | peek.usage = "<providee> <variable>" | ||
| 457 | |||
| 458 | def poke( self, params ): | ||
| 459 | """Set contents of variable defined in providee's metadata""" | ||
| 460 | name, var, value = params | ||
| 461 | bbfile = self._findProvider( name ) | ||
| 462 | if bbfile is not None: | ||
| 463 | print "ERROR: Sorry, this functionality is currently broken" | ||
| 464 | #d = cooker.pkgdata[bbfile] | ||
| 465 | #data.setVar( var, value, d ) | ||
| 466 | |||
| 467 | # mark the change semi persistant | ||
| 468 | #cooker.pkgdata.setDirty(bbfile, d) | ||
| 469 | #print "OK" | ||
| 470 | else: | ||
| 471 | print "ERROR: Nothing provides '%s'" % name | ||
| 472 | poke.usage = "<providee> <variable> <value>" | ||
| 473 | |||
| 474 | def print_( self, params ): | ||
| 475 | """Dump all files or providers""" | ||
| 476 | what = params[0] | ||
| 477 | if what == "files": | ||
| 478 | self._checkParsed() | ||
| 479 | for key in cooker.status.pkg_fn.keys(): print key | ||
| 480 | elif what == "providers": | ||
| 481 | self._checkParsed() | ||
| 482 | for key in cooker.status.providers.keys(): print key | ||
| 483 | else: | ||
| 484 | print "Usage: print %s" % self.print_.usage | ||
| 485 | print_.usage = "<files|providers>" | ||
| 486 | |||
| 487 | def python( self, params ): | ||
| 488 | """Enter the expert mode - an interactive BitBake Python Interpreter""" | ||
| 489 | sys.ps1 = "EXPERT BB>>> " | ||
| 490 | sys.ps2 = "EXPERT BB... " | ||
| 491 | import code | ||
| 492 | interpreter = code.InteractiveConsole( dict( globals() ) ) | ||
| 493 | interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version ) | ||
| 494 | |||
| 495 | def showdata( self, params ): | ||
| 496 | """Execute 'showdata' on a providee""" | ||
| 497 | cooker.showEnvironment(None, params) | ||
| 498 | showdata.usage = "<providee>" | ||
| 499 | |||
| 500 | def setVar( self, params ): | ||
| 501 | """Set an outer BitBake environment variable""" | ||
| 502 | var, value = params | ||
| 503 | data.setVar( var, value, cooker.configuration.data ) | ||
| 504 | print "OK" | ||
| 505 | setVar.usage = "<variable> <value>" | ||
| 506 | |||
| 507 | def rebuild( self, params ): | ||
| 508 | """Clean and rebuild a .bb file or a providee""" | ||
| 509 | self.build( params, "clean" ) | ||
| 510 | self.build( params, "build" ) | ||
| 511 | rebuild.usage = "<providee>" | ||
| 512 | |||
| 513 | def shell( self, params ): | ||
| 514 | """Execute a shell command and dump the output""" | ||
| 515 | if params != "": | ||
| 516 | print commands.getoutput( " ".join( params ) ) | ||
| 517 | shell.usage = "<...>" | ||
| 518 | |||
| 519 | def stage( self, params ): | ||
| 520 | """Execute 'stage' on a providee""" | ||
| 521 | self.build( params, "stage" ) | ||
| 522 | stage.usage = "<providee>" | ||
| 523 | |||
| 524 | def status( self, params ): | ||
| 525 | """<just for testing>""" | ||
| 526 | print "-" * 78 | ||
| 527 | print "building list = '%s'" % cooker.building_list | ||
| 528 | print "build path = '%s'" % cooker.build_path | ||
| 529 | print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache | ||
| 530 | print "build stats = '%s'" % cooker.stats | ||
| 531 | if last_exception is not None: print "last_exception = '%s'" % repr( last_exception.args ) | ||
| 532 | print "memory output contents = '%s'" % self._shell.myout._buffer | ||
| 533 | |||
| 534 | def test( self, params ): | ||
| 535 | """<just for testing>""" | ||
| 536 | print "testCommand called with '%s'" % params | ||
| 537 | |||
| 538 | def unpack( self, params ): | ||
| 539 | """Execute 'unpack' on a providee""" | ||
| 540 | self.build( params, "unpack" ) | ||
| 541 | unpack.usage = "<providee>" | ||
| 542 | |||
| 543 | def which( self, params ): | ||
| 544 | """Computes the providers for a given providee""" | ||
| 545 | # Need to use taskData for this information | ||
| 546 | item = params[0] | ||
| 547 | |||
| 548 | self._checkParsed() | ||
| 549 | |||
| 550 | preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 ) | ||
| 551 | if not preferred: preferred = item | ||
| 552 | |||
| 553 | try: | ||
| 554 | lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status) | ||
| 555 | except KeyError: | ||
| 556 | lv, lf, pv, pf = (None,)*4 | ||
| 557 | |||
| 558 | try: | ||
| 559 | providers = cooker.status.providers[item] | ||
| 560 | except KeyError: | ||
| 561 | print "SHELL: ERROR: Nothing provides", preferred | ||
| 562 | else: | ||
| 563 | for provider in providers: | ||
| 564 | if provider == pf: provider = " (***) %s" % provider | ||
| 565 | else: provider = " %s" % provider | ||
| 566 | print provider | ||
| 567 | which.usage = "<providee>" | ||
| 568 | |||
| 569 | ########################################################################## | ||
| 570 | # Common helper functions | ||
| 571 | ########################################################################## | ||
| 572 | |||
| 573 | def completeFilePath( bbfile ): | ||
| 574 | """Get the complete bbfile path""" | ||
| 575 | if not cooker.status: return bbfile | ||
| 576 | if not cooker.status.pkg_fn: return bbfile | ||
| 577 | for key in cooker.status.pkg_fn.keys(): | ||
| 578 | if key.endswith( bbfile ): | ||
| 579 | return key | ||
| 580 | return bbfile | ||
| 581 | |||
| 582 | def sendToPastebin( desc, content ): | ||
| 583 | """Send content to http://oe.pastebin.com""" | ||
| 584 | mydata = {} | ||
| 585 | mydata["lang"] = "Plain Text" | ||
| 586 | mydata["desc"] = desc | ||
| 587 | mydata["cvt_tabs"] = "No" | ||
| 588 | mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" ) | ||
| 589 | mydata["text"] = content | ||
| 590 | params = urllib.urlencode( mydata ) | ||
| 591 | headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"} | ||
| 592 | |||
| 593 | host = "rafb.net" | ||
| 594 | conn = httplib.HTTPConnection( "%s:80" % host ) | ||
| 595 | conn.request("POST", "/paste/paste.php", params, headers ) | ||
| 596 | |||
| 597 | response = conn.getresponse() | ||
| 598 | conn.close() | ||
| 599 | |||
| 600 | if response.status == 302: | ||
| 601 | location = response.getheader( "location" ) or "unknown" | ||
| 602 | print "SHELL: Pasted to http://%s%s" % ( host, location ) | ||
| 603 | else: | ||
| 604 | print "ERROR: %s %s" % ( response.status, response.reason ) | ||
| 605 | |||
| 606 | def completer( text, state ): | ||
| 607 | """Return a possible readline completion""" | ||
| 608 | debugOut( "completer called with text='%s', state='%d'" % ( text, state ) ) | ||
| 609 | |||
| 610 | if state == 0: | ||
| 611 | line = readline.get_line_buffer() | ||
| 612 | if " " in line: | ||
| 613 | line = line.split() | ||
| 614 | # we are in second (or more) argument | ||
| 615 | if line[0] in cmds and hasattr( cmds[line[0]][0], "usage" ): # known command and usage | ||
| 616 | u = getattr( cmds[line[0]][0], "usage" ).split()[0] | ||
| 617 | if u == "<variable>": | ||
| 618 | allmatches = cooker.configuration.data.keys() | ||
| 619 | elif u == "<bbfile>": | ||
| 620 | if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ] | ||
| 621 | else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn.keys() ] | ||
| 622 | elif u == "<providee>": | ||
| 623 | if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ] | ||
| 624 | else: allmatches = cooker.status.providers.iterkeys() | ||
| 625 | else: allmatches = [ "(No tab completion available for this command)" ] | ||
| 626 | else: allmatches = [ "(No tab completion available for this command)" ] | ||
| 627 | else: | ||
| 628 | # we are in first argument | ||
| 629 | allmatches = cmds.iterkeys() | ||
| 630 | |||
| 631 | completer.matches = [ x for x in allmatches if x[:len(text)] == text ] | ||
| 632 | #print "completer.matches = '%s'" % completer.matches | ||
| 633 | if len( completer.matches ) > state: | ||
| 634 | return completer.matches[state] | ||
| 635 | else: | ||
| 636 | return None | ||
| 637 | |||
| 638 | def debugOut( text ): | ||
| 639 | if debug: | ||
| 640 | sys.stderr.write( "( %s )\n" % text ) | ||
| 641 | |||
| 642 | def columnize( alist, width = 80 ): | ||
| 643 | """ | ||
| 644 | A word-wrap function that preserves existing line breaks | ||
| 645 | and most spaces in the text. Expects that existing line | ||
| 646 | breaks are posix newlines (\n). | ||
| 647 | """ | ||
| 648 | return reduce(lambda line, word, width=width: '%s%s%s' % | ||
| 649 | (line, | ||
| 650 | ' \n'[(len(line[line.rfind('\n')+1:]) | ||
| 651 | + len(word.split('\n',1)[0] | ||
| 652 | ) >= width)], | ||
| 653 | word), | ||
| 654 | alist | ||
| 655 | ) | ||
| 656 | |||
| 657 | def globfilter( names, pattern ): | ||
| 658 | return fnmatch.filter( names, pattern ) | ||
| 659 | |||
| 660 | ########################################################################## | ||
| 661 | # Class MemoryOutput | ||
| 662 | ########################################################################## | ||
| 663 | |||
| 664 | class MemoryOutput: | ||
| 665 | """File-like output class buffering the output of the last 10 commands""" | ||
| 666 | def __init__( self, delegate ): | ||
| 667 | self.delegate = delegate | ||
| 668 | self._buffer = [] | ||
| 669 | self.text = [] | ||
| 670 | self._command = None | ||
| 671 | |||
| 672 | def startCommand( self, command ): | ||
| 673 | self._command = command | ||
| 674 | self.text = [] | ||
| 675 | def endCommand( self ): | ||
| 676 | if self._command is not None: | ||
| 677 | if len( self._buffer ) == 10: del self._buffer[0] | ||
| 678 | self._buffer.append( ( self._command, self.text ) ) | ||
| 679 | def removeLast( self ): | ||
| 680 | if self._buffer: | ||
| 681 | del self._buffer[ len( self._buffer ) - 1 ] | ||
| 682 | self.text = [] | ||
| 683 | self._command = None | ||
| 684 | def lastBuffer( self ): | ||
| 685 | if self._buffer: | ||
| 686 | return self._buffer[ len( self._buffer ) -1 ][1] | ||
| 687 | def bufferedCommands( self ): | ||
| 688 | return [ cmd for cmd, output in self._buffer ] | ||
| 689 | def buffer( self, i ): | ||
| 690 | if i < len( self._buffer ): | ||
| 691 | return "BB>> %s\n%s" % ( self._buffer[i][0], "".join( self._buffer[i][1] ) ) | ||
| 692 | else: return "ERROR: Invalid buffer number. Buffer needs to be in (0, %d)" % ( len( self._buffer ) - 1 ) | ||
| 693 | def write( self, text ): | ||
| 694 | if self._command is not None and text != "BB>> ": self.text.append( text ) | ||
| 695 | if self.delegate is not None: self.delegate.write( text ) | ||
| 696 | def flush( self ): | ||
| 697 | return self.delegate.flush() | ||
| 698 | def fileno( self ): | ||
| 699 | return self.delegate.fileno() | ||
| 700 | def isatty( self ): | ||
| 701 | return self.delegate.isatty() | ||
| 702 | |||
| 703 | ########################################################################## | ||
| 704 | # Class BitBakeShell | ||
| 705 | ########################################################################## | ||
| 706 | |||
| 707 | class BitBakeShell: | ||
| 708 | |||
| 709 | def __init__( self ): | ||
| 710 | """Register commands and set up readline""" | ||
| 711 | self.commandQ = Queue.Queue() | ||
| 712 | self.commands = BitBakeShellCommands( self ) | ||
| 713 | self.myout = MemoryOutput( sys.stdout ) | ||
| 714 | self.historyfilename = os.path.expanduser( "~/.bbsh_history" ) | ||
| 715 | self.startupfilename = os.path.expanduser( "~/.bbsh_startup" ) | ||
| 716 | |||
| 717 | readline.set_completer( completer ) | ||
| 718 | readline.set_completer_delims( " " ) | ||
| 719 | readline.parse_and_bind("tab: complete") | ||
| 720 | |||
| 721 | try: | ||
| 722 | readline.read_history_file( self.historyfilename ) | ||
| 723 | except IOError: | ||
| 724 | pass # It doesn't exist yet. | ||
| 725 | |||
| 726 | print __credits__ | ||
| 727 | |||
| 728 | def cleanup( self ): | ||
| 729 | """Write readline history and clean up resources""" | ||
| 730 | debugOut( "writing command history" ) | ||
| 731 | try: | ||
| 732 | readline.write_history_file( self.historyfilename ) | ||
| 733 | except: | ||
| 734 | print "SHELL: Unable to save command history" | ||
| 735 | |||
| 736 | def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ): | ||
| 737 | """Register a command""" | ||
| 738 | if usage == "": usage = command | ||
| 739 | if helptext == "": helptext = function.__doc__ or "<not yet documented>" | ||
| 740 | cmds[command] = ( function, numparams, usage, helptext ) | ||
| 741 | |||
| 742 | def processCommand( self, command, params ): | ||
| 743 | """Process a command. Check number of params and print a usage string, if appropriate""" | ||
| 744 | debugOut( "processing command '%s'..." % command ) | ||
| 745 | try: | ||
| 746 | function, numparams, usage, helptext = cmds[command] | ||
| 747 | except KeyError: | ||
| 748 | print "SHELL: ERROR: '%s' command is not a valid command." % command | ||
| 749 | self.myout.removeLast() | ||
| 750 | else: | ||
| 751 | if (numparams != -1) and (not len( params ) == numparams): | ||
| 752 | print "Usage: '%s'" % usage | ||
| 753 | return | ||
| 754 | |||
| 755 | result = function( self.commands, params ) | ||
| 756 | debugOut( "result was '%s'" % result ) | ||
| 757 | |||
| 758 | def processStartupFile( self ): | ||
| 759 | """Read and execute all commands found in $HOME/.bbsh_startup""" | ||
| 760 | if os.path.exists( self.startupfilename ): | ||
| 761 | startupfile = open( self.startupfilename, "r" ) | ||
| 762 | for cmdline in startupfile: | ||
| 763 | debugOut( "processing startup line '%s'" % cmdline ) | ||
| 764 | if not cmdline: | ||
| 765 | continue | ||
| 766 | if "|" in cmdline: | ||
| 767 | print "ERROR: '|' in startup file is not allowed. Ignoring line" | ||
| 768 | continue | ||
| 769 | self.commandQ.put( cmdline.strip() ) | ||
| 770 | |||
| 771 | def main( self ): | ||
| 772 | """The main command loop""" | ||
| 773 | while not leave_mainloop: | ||
| 774 | try: | ||
| 775 | if self.commandQ.empty(): | ||
| 776 | sys.stdout = self.myout.delegate | ||
| 777 | cmdline = raw_input( "BB>> " ) | ||
| 778 | sys.stdout = self.myout | ||
| 779 | else: | ||
| 780 | cmdline = self.commandQ.get() | ||
| 781 | if cmdline: | ||
| 782 | allCommands = cmdline.split( ';' ) | ||
| 783 | for command in allCommands: | ||
| 784 | pipecmd = None | ||
| 785 | # | ||
| 786 | # special case for expert mode | ||
| 787 | if command == 'python': | ||
| 788 | sys.stdout = self.myout.delegate | ||
| 789 | self.processCommand( command, "" ) | ||
| 790 | sys.stdout = self.myout | ||
| 791 | else: | ||
| 792 | self.myout.startCommand( command ) | ||
| 793 | if '|' in command: # disable output | ||
| 794 | command, pipecmd = command.split( '|' ) | ||
| 795 | delegate = self.myout.delegate | ||
| 796 | self.myout.delegate = None | ||
| 797 | tokens = shlex.split( command, True ) | ||
| 798 | self.processCommand( tokens[0], tokens[1:] or "" ) | ||
| 799 | self.myout.endCommand() | ||
| 800 | if pipecmd is not None: # restore output | ||
| 801 | self.myout.delegate = delegate | ||
| 802 | |||
| 803 | pipe = popen2.Popen4( pipecmd ) | ||
| 804 | pipe.tochild.write( "\n".join( self.myout.lastBuffer() ) ) | ||
| 805 | pipe.tochild.close() | ||
| 806 | sys.stdout.write( pipe.fromchild.read() ) | ||
| 807 | # | ||
| 808 | except EOFError: | ||
| 809 | |||
| 810 | return | ||
| 811 | except KeyboardInterrupt: | ||
| 812 | |||
| 813 | |||
| 814 | ########################################################################## | ||
| 815 | # Start function - called from the BitBake command line utility | ||
| 816 | ########################################################################## | ||
| 817 | |||
| 818 | def start( aCooker ): | ||
| 819 | global cooker | ||
| 820 | cooker = aCooker | ||
| 821 | bbshell = BitBakeShell() | ||
| 822 | bbshell.processStartupFile() | ||
| 823 | bbshell.main() | ||
| 824 | bbshell.cleanup() | ||
| 825 | |||
| 826 | if __name__ == "__main__": | ||
| 827 | print "SHELL: Sorry, this program should only be called by BitBake." | ||
diff --git a/bitbake-dev/lib/bb/taskdata.py b/bitbake-dev/lib/bb/taskdata.py new file mode 100644 index 0000000000..566614ee63 --- /dev/null +++ b/bitbake-dev/lib/bb/taskdata.py | |||
| @@ -0,0 +1,594 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # ex:ts=4:sw=4:sts=4:et | ||
| 3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 4 | """ | ||
| 5 | BitBake 'TaskData' implementation | ||
| 6 | |||
| 7 | Task data collection and handling | ||
| 8 | |||
| 9 | """ | ||
| 10 | |||
| 11 | # Copyright (C) 2006 Richard Purdie | ||
| 12 | # | ||
| 13 | # This program is free software; you can redistribute it and/or modify | ||
| 14 | # it under the terms of the GNU General Public License version 2 as | ||
| 15 | # published by the Free Software Foundation. | ||
| 16 | # | ||
| 17 | # This program is distributed in the hope that it will be useful, | ||
| 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 20 | # GNU General Public License for more details. | ||
| 21 | # | ||
| 22 | # You should have received a copy of the GNU General Public License along | ||
| 23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 25 | |||
| 26 | from bb import data, event, mkdirhier, utils | ||
| 27 | import bb, os | ||
| 28 | |||
| 29 | class TaskData: | ||
| 30 | """ | ||
| 31 | BitBake Task Data implementation | ||
| 32 | """ | ||
| 33 | def __init__(self, abort = True): | ||
| 34 | self.build_names_index = [] | ||
| 35 | self.run_names_index = [] | ||
| 36 | self.fn_index = [] | ||
| 37 | |||
| 38 | self.build_targets = {} | ||
| 39 | self.run_targets = {} | ||
| 40 | |||
| 41 | self.external_targets = [] | ||
| 42 | |||
| 43 | self.tasks_fnid = [] | ||
| 44 | self.tasks_name = [] | ||
| 45 | self.tasks_tdepends = [] | ||
| 46 | self.tasks_idepends = [] | ||
| 47 | # Cache to speed up task ID lookups | ||
| 48 | self.tasks_lookup = {} | ||
| 49 | |||
| 50 | self.depids = {} | ||
| 51 | self.rdepids = {} | ||
| 52 | |||
| 53 | self.consider_msgs_cache = [] | ||
| 54 | |||
| 55 | self.failed_deps = [] | ||
| 56 | self.failed_rdeps = [] | ||
| 57 | self.failed_fnids = [] | ||
| 58 | |||
| 59 | self.abort = abort | ||
| 60 | |||
| 61 | def getbuild_id(self, name): | ||
| 62 | """ | ||
| 63 | Return an ID number for the build target name. | ||
| 64 | If it doesn't exist, create one. | ||
| 65 | """ | ||
| 66 | if not name in self.build_names_index: | ||
| 67 | self.build_names_index.append(name) | ||
| 68 | return len(self.build_names_index) - 1 | ||
| 69 | |||
| 70 | return self.build_names_index.index(name) | ||
| 71 | |||
| 72 | def getrun_id(self, name): | ||
| 73 | """ | ||
| 74 | Return an ID number for the run target name. | ||
| 75 | If it doesn't exist, create one. | ||
| 76 | """ | ||
| 77 | if not name in self.run_names_index: | ||
| 78 | self.run_names_index.append(name) | ||
| 79 | return len(self.run_names_index) - 1 | ||
| 80 | |||
| 81 | return self.run_names_index.index(name) | ||
| 82 | |||
| 83 | def getfn_id(self, name): | ||
| 84 | """ | ||
| 85 | Return an ID number for the filename. | ||
| 86 | If it doesn't exist, create one. | ||
| 87 | """ | ||
| 88 | if not name in self.fn_index: | ||
| 89 | self.fn_index.append(name) | ||
| 90 | return len(self.fn_index) - 1 | ||
| 91 | |||
| 92 | return self.fn_index.index(name) | ||
| 93 | |||
| 94 | def gettask_ids(self, fnid): | ||
| 95 | """ | ||
| 96 | Return an array of the ID numbers matching a given fnid. | ||
| 97 | """ | ||
| 98 | ids = [] | ||
| 99 | if fnid in self.tasks_lookup: | ||
| 100 | for task in self.tasks_lookup[fnid]: | ||
| 101 | ids.append(self.tasks_lookup[fnid][task]) | ||
| 102 | return ids | ||
| 103 | |||
| 104 | def gettask_id(self, fn, task, create = True): | ||
| 105 | """ | ||
| 106 | Return an ID number for the task matching fn and task. | ||
| 107 | If it doesn't exist, create one by default. | ||
| 108 | Optionally return None instead. | ||
| 109 | """ | ||
| 110 | fnid = self.getfn_id(fn) | ||
| 111 | |||
| 112 | if fnid in self.tasks_lookup: | ||
| 113 | if task in self.tasks_lookup[fnid]: | ||
| 114 | return self.tasks_lookup[fnid][task] | ||
| 115 | |||
| 116 | if not create: | ||
| 117 | return None | ||
| 118 | |||
| 119 | self.tasks_name.append(task) | ||
| 120 | self.tasks_fnid.append(fnid) | ||
| 121 | self.tasks_tdepends.append([]) | ||
| 122 | self.tasks_idepends.append([]) | ||
| 123 | |||
| 124 | listid = len(self.tasks_name) - 1 | ||
| 125 | |||
| 126 | if fnid not in self.tasks_lookup: | ||
| 127 | self.tasks_lookup[fnid] = {} | ||
| 128 | self.tasks_lookup[fnid][task] = listid | ||
| 129 | |||
| 130 | return listid | ||
| 131 | |||
| 132 | def add_tasks(self, fn, dataCache): | ||
| 133 | """ | ||
| 134 | Add tasks for a given fn to the database | ||
| 135 | """ | ||
| 136 | |||
| 137 | task_deps = dataCache.task_deps[fn] | ||
| 138 | |||
| 139 | fnid = self.getfn_id(fn) | ||
| 140 | |||
| 141 | if fnid in self.failed_fnids: | ||
| 142 | bb.msg.fatal(bb.msg.domain.TaskData, "Trying to re-add a failed file? Something is broken...") | ||
| 143 | |||
| 144 | # Check if we've already seen this fn | ||
| 145 | if fnid in self.tasks_fnid: | ||
| 146 | return | ||
| 147 | |||
| 148 | for task in task_deps['tasks']: | ||
| 149 | |||
| 150 | # Work out task dependencies | ||
| 151 | parentids = [] | ||
| 152 | for dep in task_deps['parents'][task]: | ||
| 153 | parentid = self.gettask_id(fn, dep) | ||
| 154 | parentids.append(parentid) | ||
| 155 | taskid = self.gettask_id(fn, task) | ||
| 156 | self.tasks_tdepends[taskid].extend(parentids) | ||
| 157 | |||
| 158 | # Touch all intertask dependencies | ||
| 159 | if 'depends' in task_deps and task in task_deps['depends']: | ||
| 160 | ids = [] | ||
| 161 | for dep in task_deps['depends'][task].split(): | ||
| 162 | if dep: | ||
| 163 | ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1])) | ||
| 164 | self.tasks_idepends[taskid].extend(ids) | ||
| 165 | |||
| 166 | # Work out build dependencies | ||
| 167 | if not fnid in self.depids: | ||
| 168 | dependids = {} | ||
| 169 | for depend in dataCache.deps[fn]: | ||
| 170 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn)) | ||
| 171 | dependids[self.getbuild_id(depend)] = None | ||
| 172 | self.depids[fnid] = dependids.keys() | ||
| 173 | |||
| 174 | # Work out runtime dependencies | ||
| 175 | if not fnid in self.rdepids: | ||
| 176 | rdependids = {} | ||
| 177 | rdepends = dataCache.rundeps[fn] | ||
| 178 | rrecs = dataCache.runrecs[fn] | ||
| 179 | for package in rdepends: | ||
| 180 | for rdepend in bb.utils.explode_deps(rdepends[package]): | ||
| 181 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn)) | ||
| 182 | rdependids[self.getrun_id(rdepend)] = None | ||
| 183 | for package in rrecs: | ||
| 184 | for rdepend in bb.utils.explode_deps(rrecs[package]): | ||
| 185 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn)) | ||
| 186 | rdependids[self.getrun_id(rdepend)] = None | ||
| 187 | self.rdepids[fnid] = rdependids.keys() | ||
| 188 | |||
| 189 | for dep in self.depids[fnid]: | ||
| 190 | if dep in self.failed_deps: | ||
| 191 | self.fail_fnid(fnid) | ||
| 192 | return | ||
| 193 | for dep in self.rdepids[fnid]: | ||
| 194 | if dep in self.failed_rdeps: | ||
| 195 | self.fail_fnid(fnid) | ||
| 196 | return | ||
| 197 | |||
| 198 | def have_build_target(self, target): | ||
| 199 | """ | ||
| 200 | Have we a build target matching this name? | ||
| 201 | """ | ||
| 202 | targetid = self.getbuild_id(target) | ||
| 203 | |||
| 204 | if targetid in self.build_targets: | ||
| 205 | return True | ||
| 206 | return False | ||
| 207 | |||
| 208 | def have_runtime_target(self, target): | ||
| 209 | """ | ||
| 210 | Have we a runtime target matching this name? | ||
| 211 | """ | ||
| 212 | targetid = self.getrun_id(target) | ||
| 213 | |||
| 214 | if targetid in self.run_targets: | ||
| 215 | return True | ||
| 216 | return False | ||
| 217 | |||
| 218 | def add_build_target(self, fn, item): | ||
| 219 | """ | ||
| 220 | Add a build target. | ||
| 221 | If already present, append the provider fn to the list | ||
| 222 | """ | ||
| 223 | targetid = self.getbuild_id(item) | ||
| 224 | fnid = self.getfn_id(fn) | ||
| 225 | |||
| 226 | if targetid in self.build_targets: | ||
| 227 | if fnid in self.build_targets[targetid]: | ||
| 228 | return | ||
| 229 | self.build_targets[targetid].append(fnid) | ||
| 230 | return | ||
| 231 | self.build_targets[targetid] = [fnid] | ||
| 232 | |||
| 233 | def add_runtime_target(self, fn, item): | ||
| 234 | """ | ||
| 235 | Add a runtime target. | ||
| 236 | If already present, append the provider fn to the list | ||
| 237 | """ | ||
| 238 | targetid = self.getrun_id(item) | ||
| 239 | fnid = self.getfn_id(fn) | ||
| 240 | |||
| 241 | if targetid in self.run_targets: | ||
| 242 | if fnid in self.run_targets[targetid]: | ||
| 243 | return | ||
| 244 | self.run_targets[targetid].append(fnid) | ||
| 245 | return | ||
| 246 | self.run_targets[targetid] = [fnid] | ||
| 247 | |||
| 248 | def mark_external_target(self, item): | ||
| 249 | """ | ||
| 250 | Mark a build target as being externally requested | ||
| 251 | """ | ||
| 252 | targetid = self.getbuild_id(item) | ||
| 253 | |||
| 254 | if targetid not in self.external_targets: | ||
| 255 | self.external_targets.append(targetid) | ||
| 256 | |||
| 257 | def get_unresolved_build_targets(self, dataCache): | ||
| 258 | """ | ||
| 259 | Return a list of build targets who's providers | ||
| 260 | are unknown. | ||
| 261 | """ | ||
| 262 | unresolved = [] | ||
| 263 | for target in self.build_names_index: | ||
| 264 | if target in dataCache.ignored_dependencies: | ||
| 265 | continue | ||
| 266 | if self.build_names_index.index(target) in self.failed_deps: | ||
| 267 | continue | ||
| 268 | if not self.have_build_target(target): | ||
| 269 | unresolved.append(target) | ||
| 270 | return unresolved | ||
| 271 | |||
| 272 | def get_unresolved_run_targets(self, dataCache): | ||
| 273 | """ | ||
| 274 | Return a list of runtime targets who's providers | ||
| 275 | are unknown. | ||
| 276 | """ | ||
| 277 | unresolved = [] | ||
| 278 | for target in self.run_names_index: | ||
| 279 | if target in dataCache.ignored_dependencies: | ||
| 280 | continue | ||
| 281 | if self.run_names_index.index(target) in self.failed_rdeps: | ||
| 282 | continue | ||
| 283 | if not self.have_runtime_target(target): | ||
| 284 | unresolved.append(target) | ||
| 285 | return unresolved | ||
| 286 | |||
| 287 | def get_provider(self, item): | ||
| 288 | """ | ||
| 289 | Return a list of providers of item | ||
| 290 | """ | ||
| 291 | targetid = self.getbuild_id(item) | ||
| 292 | |||
| 293 | return self.build_targets[targetid] | ||
| 294 | |||
| 295 | def get_dependees(self, itemid): | ||
| 296 | """ | ||
| 297 | Return a list of targets which depend on item | ||
| 298 | """ | ||
| 299 | dependees = [] | ||
| 300 | for fnid in self.depids: | ||
| 301 | if itemid in self.depids[fnid]: | ||
| 302 | dependees.append(fnid) | ||
| 303 | return dependees | ||
| 304 | |||
| 305 | def get_dependees_str(self, item): | ||
| 306 | """ | ||
| 307 | Return a list of targets which depend on item as a user readable string | ||
| 308 | """ | ||
| 309 | itemid = self.getbuild_id(item) | ||
| 310 | dependees = [] | ||
| 311 | for fnid in self.depids: | ||
| 312 | if itemid in self.depids[fnid]: | ||
| 313 | dependees.append(self.fn_index[fnid]) | ||
| 314 | return dependees | ||
| 315 | |||
| 316 | def get_rdependees(self, itemid): | ||
| 317 | """ | ||
| 318 | Return a list of targets which depend on runtime item | ||
| 319 | """ | ||
| 320 | dependees = [] | ||
| 321 | for fnid in self.rdepids: | ||
| 322 | if itemid in self.rdepids[fnid]: | ||
| 323 | dependees.append(fnid) | ||
| 324 | return dependees | ||
| 325 | |||
| 326 | def get_rdependees_str(self, item): | ||
| 327 | """ | ||
| 328 | Return a list of targets which depend on runtime item as a user readable string | ||
| 329 | """ | ||
| 330 | itemid = self.getrun_id(item) | ||
| 331 | dependees = [] | ||
| 332 | for fnid in self.rdepids: | ||
| 333 | if itemid in self.rdepids[fnid]: | ||
| 334 | dependees.append(self.fn_index[fnid]) | ||
| 335 | return dependees | ||
| 336 | |||
| 337 | def add_provider(self, cfgData, dataCache, item): | ||
| 338 | try: | ||
| 339 | self.add_provider_internal(cfgData, dataCache, item) | ||
| 340 | except bb.providers.NoProvider: | ||
| 341 | if self.abort: | ||
| 342 | bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item))) | ||
| 343 | raise | ||
| 344 | targetid = self.getbuild_id(item) | ||
| 345 | self.remove_buildtarget(targetid) | ||
| 346 | |||
| 347 | self.mark_external_target(item) | ||
| 348 | |||
| 349 | def add_provider_internal(self, cfgData, dataCache, item): | ||
| 350 | """ | ||
| 351 | Add the providers of item to the task data | ||
| 352 | Mark entries were specifically added externally as against dependencies | ||
| 353 | added internally during dependency resolution | ||
| 354 | """ | ||
| 355 | |||
| 356 | if item in dataCache.ignored_dependencies: | ||
| 357 | return | ||
| 358 | |||
| 359 | if not item in dataCache.providers: | ||
| 360 | bb.msg.note(2, bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item))) | ||
| 361 | bb.event.fire(bb.event.NoProvider(item, cfgData)) | ||
| 362 | raise bb.providers.NoProvider(item) | ||
| 363 | |||
| 364 | if self.have_build_target(item): | ||
| 365 | return | ||
| 366 | |||
| 367 | all_p = dataCache.providers[item] | ||
| 368 | |||
| 369 | eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache) | ||
| 370 | |||
| 371 | for p in eligible: | ||
| 372 | fnid = self.getfn_id(p) | ||
| 373 | if fnid in self.failed_fnids: | ||
| 374 | eligible.remove(p) | ||
| 375 | |||
| 376 | if not eligible: | ||
| 377 | bb.msg.note(2, bb.msg.domain.Provider, "No buildable provider PROVIDES '%s' but '%s' DEPENDS on or otherwise requires it. Enable debugging and see earlier logs to find unbuildable providers." % (item, self.get_dependees_str(item))) | ||
| 378 | bb.event.fire(bb.event.NoProvider(item, cfgData)) | ||
| 379 | raise bb.providers.NoProvider(item) | ||
| 380 | |||
| 381 | if len(eligible) > 1 and foundUnique == False: | ||
| 382 | if item not in self.consider_msgs_cache: | ||
| 383 | providers_list = [] | ||
| 384 | for fn in eligible: | ||
| 385 | providers_list.append(dataCache.pkg_fn[fn]) | ||
| 386 | bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available for %s (%s);" % (item, ", ".join(providers_list))) | ||
| 387 | bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item) | ||
| 388 | bb.event.fire(bb.event.MultipleProviders(item, providers_list, cfgData)) | ||
| 389 | self.consider_msgs_cache.append(item) | ||
| 390 | |||
| 391 | for fn in eligible: | ||
| 392 | fnid = self.getfn_id(fn) | ||
| 393 | if fnid in self.failed_fnids: | ||
| 394 | continue | ||
| 395 | bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item)) | ||
| 396 | self.add_build_target(fn, item) | ||
| 397 | self.add_tasks(fn, dataCache) | ||
| 398 | |||
| 399 | |||
| 400 | #item = dataCache.pkg_fn[fn] | ||
| 401 | |||
| 402 | def add_rprovider(self, cfgData, dataCache, item): | ||
| 403 | """ | ||
| 404 | Add the runtime providers of item to the task data | ||
| 405 | (takes item names from RDEPENDS/PACKAGES namespace) | ||
| 406 | """ | ||
| 407 | |||
| 408 | if item in dataCache.ignored_dependencies: | ||
| 409 | return | ||
| 410 | |||
| 411 | if self.have_runtime_target(item): | ||
| 412 | return | ||
| 413 | |||
| 414 | all_p = bb.providers.getRuntimeProviders(dataCache, item) | ||
| 415 | |||
| 416 | if not all_p: | ||
| 417 | bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables" % (self.get_rdependees_str(item), item)) | ||
| 418 | bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True)) | ||
| 419 | raise bb.providers.NoRProvider(item) | ||
| 420 | |||
| 421 | eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache) | ||
| 422 | |||
| 423 | for p in eligible: | ||
| 424 | fnid = self.getfn_id(p) | ||
| 425 | if fnid in self.failed_fnids: | ||
| 426 | eligible.remove(p) | ||
| 427 | |||
| 428 | if not eligible: | ||
| 429 | bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables of any buildable targets.\nEnable debugging and see earlier logs to find unbuildable targets." % (self.get_rdependees_str(item), item)) | ||
| 430 | bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True)) | ||
| 431 | raise bb.providers.NoRProvider(item) | ||
| 432 | |||
| 433 | if len(eligible) > 1 and numberPreferred == 0: | ||
| 434 | if item not in self.consider_msgs_cache: | ||
| 435 | providers_list = [] | ||
| 436 | for fn in eligible: | ||
| 437 | providers_list.append(dataCache.pkg_fn[fn]) | ||
| 438 | bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list))) | ||
| 439 | bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item) | ||
| 440 | bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True)) | ||
| 441 | self.consider_msgs_cache.append(item) | ||
| 442 | |||
| 443 | if numberPreferred > 1: | ||
| 444 | if item not in self.consider_msgs_cache: | ||
| 445 | providers_list = [] | ||
| 446 | for fn in eligible: | ||
| 447 | providers_list.append(dataCache.pkg_fn[fn]) | ||
| 448 | bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (top %s entries preferred) (%s);" % (item, numberPreferred, ", ".join(providers_list))) | ||
| 449 | bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item) | ||
| 450 | bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True)) | ||
| 451 | self.consider_msgs_cache.append(item) | ||
| 452 | |||
| 453 | # run through the list until we find one that we can build | ||
| 454 | for fn in eligible: | ||
| 455 | fnid = self.getfn_id(fn) | ||
| 456 | if fnid in self.failed_fnids: | ||
| 457 | continue | ||
| 458 | bb.msg.debug(2, bb.msg.domain.Provider, "adding '%s' to satisfy runtime '%s'" % (fn, item)) | ||
| 459 | self.add_runtime_target(fn, item) | ||
| 460 | self.add_tasks(fn, dataCache) | ||
| 461 | |||
| 462 | def fail_fnid(self, fnid, missing_list = []): | ||
| 463 | """ | ||
| 464 | Mark a file as failed (unbuildable) | ||
| 465 | Remove any references from build and runtime provider lists | ||
| 466 | |||
| 467 | missing_list, A list of missing requirements for this target | ||
| 468 | """ | ||
| 469 | if fnid in self.failed_fnids: | ||
| 470 | return | ||
| 471 | bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid]) | ||
| 472 | self.failed_fnids.append(fnid) | ||
| 473 | for target in self.build_targets: | ||
| 474 | if fnid in self.build_targets[target]: | ||
| 475 | self.build_targets[target].remove(fnid) | ||
| 476 | if len(self.build_targets[target]) == 0: | ||
| 477 | self.remove_buildtarget(target, missing_list) | ||
| 478 | for target in self.run_targets: | ||
| 479 | if fnid in self.run_targets[target]: | ||
| 480 | self.run_targets[target].remove(fnid) | ||
| 481 | if len(self.run_targets[target]) == 0: | ||
| 482 | self.remove_runtarget(target, missing_list) | ||
| 483 | |||
| 484 | def remove_buildtarget(self, targetid, missing_list = []): | ||
| 485 | """ | ||
| 486 | Mark a build target as failed (unbuildable) | ||
| 487 | Trigger removal of any files that have this as a dependency | ||
| 488 | """ | ||
| 489 | if not missing_list: | ||
| 490 | missing_list = [self.build_names_index[targetid]] | ||
| 491 | else: | ||
| 492 | missing_list = [self.build_names_index[targetid]] + missing_list | ||
| 493 | bb.msg.note(2, bb.msg.domain.Provider, "Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) | ||
| 494 | self.failed_deps.append(targetid) | ||
| 495 | dependees = self.get_dependees(targetid) | ||
| 496 | for fnid in dependees: | ||
| 497 | self.fail_fnid(fnid, missing_list) | ||
| 498 | for taskid in range(len(self.tasks_idepends)): | ||
| 499 | idepends = self.tasks_idepends[taskid] | ||
| 500 | for (idependid, idependtask) in idepends: | ||
| 501 | if idependid == targetid: | ||
| 502 | self.fail_fnid(self.tasks_fnid[taskid], missing_list) | ||
| 503 | |||
| 504 | if self.abort and targetid in self.external_targets: | ||
| 505 | bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) | ||
| 506 | raise bb.providers.NoProvider | ||
| 507 | |||
| 508 | def remove_runtarget(self, targetid, missing_list = []): | ||
| 509 | """ | ||
| 510 | Mark a run target as failed (unbuildable) | ||
| 511 | Trigger removal of any files that have this as a dependency | ||
| 512 | """ | ||
| 513 | if not missing_list: | ||
| 514 | missing_list = [self.run_names_index[targetid]] | ||
| 515 | else: | ||
| 516 | missing_list = [self.run_names_index[targetid]] + missing_list | ||
| 517 | |||
| 518 | bb.msg.note(1, bb.msg.domain.Provider, "Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.run_names_index[targetid], missing_list)) | ||
| 519 | self.failed_rdeps.append(targetid) | ||
| 520 | dependees = self.get_rdependees(targetid) | ||
| 521 | for fnid in dependees: | ||
| 522 | self.fail_fnid(fnid, missing_list) | ||
| 523 | |||
| 524 | def add_unresolved(self, cfgData, dataCache): | ||
| 525 | """ | ||
| 526 | Resolve all unresolved build and runtime targets | ||
| 527 | """ | ||
| 528 | bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") | ||
| 529 | while 1: | ||
| 530 | added = 0 | ||
| 531 | for target in self.get_unresolved_build_targets(dataCache): | ||
| 532 | try: | ||
| 533 | self.add_provider_internal(cfgData, dataCache, target) | ||
| 534 | added = added + 1 | ||
| 535 | except bb.providers.NoProvider: | ||
| 536 | targetid = self.getbuild_id(target) | ||
| 537 | if self.abort and targetid in self.external_targets: | ||
| 538 | bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (target, self.get_dependees_str(target))) | ||
| 539 | raise | ||
| 540 | self.remove_buildtarget(targetid) | ||
| 541 | for target in self.get_unresolved_run_targets(dataCache): | ||
| 542 | try: | ||
| 543 | self.add_rprovider(cfgData, dataCache, target) | ||
| 544 | added = added + 1 | ||
| 545 | except bb.providers.NoRProvider: | ||
| 546 | self.remove_runtarget(self.getrun_id(target)) | ||
| 547 | bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependecies") | ||
| 548 | if added == 0: | ||
| 549 | break | ||
| 550 | # self.dump_data() | ||
| 551 | |||
| 552 | def dump_data(self): | ||
| 553 | """ | ||
| 554 | Dump some debug information on the internal data structures | ||
| 555 | """ | ||
| 556 | bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:") | ||
| 557 | bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index)) | ||
| 558 | |||
| 559 | bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:") | ||
| 560 | bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index)) | ||
| 561 | |||
| 562 | bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:") | ||
| 563 | for buildid in range(len(self.build_names_index)): | ||
| 564 | target = self.build_names_index[buildid] | ||
| 565 | targets = "None" | ||
| 566 | if buildid in self.build_targets: | ||
| 567 | targets = self.build_targets[buildid] | ||
| 568 | bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (buildid, target, targets)) | ||
| 569 | |||
| 570 | bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:") | ||
| 571 | for runid in range(len(self.run_names_index)): | ||
| 572 | target = self.run_names_index[runid] | ||
| 573 | targets = "None" | ||
| 574 | if runid in self.run_targets: | ||
| 575 | targets = self.run_targets[runid] | ||
| 576 | bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (runid, target, targets)) | ||
| 577 | |||
| 578 | bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:") | ||
| 579 | for task in range(len(self.tasks_name)): | ||
| 580 | bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % ( | ||
| 581 | task, | ||
| 582 | self.fn_index[self.tasks_fnid[task]], | ||
| 583 | self.tasks_name[task], | ||
| 584 | self.tasks_tdepends[task])) | ||
| 585 | |||
| 586 | bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):") | ||
| 587 | for fnid in self.depids: | ||
| 588 | bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.depids[fnid])) | ||
| 589 | |||
| 590 | bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):") | ||
| 591 | for fnid in self.rdepids: | ||
| 592 | bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid])) | ||
| 593 | |||
| 594 | |||
diff --git a/bitbake-dev/lib/bb/ui/__init__.py b/bitbake-dev/lib/bb/ui/__init__.py new file mode 100644 index 0000000000..c6a377a8e6 --- /dev/null +++ b/bitbake-dev/lib/bb/ui/__init__.py | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | # | ||
| 2 | # BitBake UI Implementation | ||
| 3 | # | ||
| 4 | # Copyright (C) 2006-2007 Richard Purdie | ||
| 5 | # | ||
| 6 | # This program is free software; you can redistribute it and/or modify | ||
| 7 | # it under the terms of the GNU General Public License version 2 as | ||
| 8 | # published by the Free Software Foundation. | ||
| 9 | # | ||
| 10 | # This program is distributed in the hope that it will be useful, | ||
| 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | # GNU General Public License for more details. | ||
| 14 | # | ||
| 15 | # You should have received a copy of the GNU General Public License along | ||
| 16 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 17 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 18 | |||
diff --git a/bitbake-dev/lib/bb/ui/depexplorer.py b/bitbake-dev/lib/bb/ui/depexplorer.py new file mode 100644 index 0000000000..becbb5dd5d --- /dev/null +++ b/bitbake-dev/lib/bb/ui/depexplorer.py | |||
| @@ -0,0 +1,271 @@ | |||
| 1 | # | ||
| 2 | # BitBake Graphical GTK based Dependency Explorer | ||
| 3 | # | ||
| 4 | # Copyright (C) 2007 Ross Burton | ||
| 5 | # Copyright (C) 2007 - 2008 Richard Purdie | ||
| 6 | # | ||
| 7 | # This program is free software; you can redistribute it and/or modify | ||
| 8 | # it under the terms of the GNU General Public License version 2 as | ||
| 9 | # published by the Free Software Foundation. | ||
| 10 | # | ||
| 11 | # This program is distributed in the hope that it will be useful, | ||
| 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | # GNU General Public License for more details. | ||
| 15 | # | ||
| 16 | # You should have received a copy of the GNU General Public License along | ||
| 17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | |||
| 20 | import gobject | ||
| 21 | import gtk | ||
| 22 | import threading | ||
| 23 | |||
| 24 | # Package Model | ||
| 25 | (COL_PKG_NAME) = (0) | ||
| 26 | |||
| 27 | # Dependency Model | ||
| 28 | (TYPE_DEP, TYPE_RDEP) = (0, 1) | ||
| 29 | (COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2) | ||
| 30 | |||
| 31 | class PackageDepView(gtk.TreeView): | ||
| 32 | def __init__(self, model, dep_type, label): | ||
| 33 | gtk.TreeView.__init__(self) | ||
| 34 | self.current = None | ||
| 35 | self.dep_type = dep_type | ||
| 36 | self.filter_model = model.filter_new() | ||
| 37 | self.filter_model.set_visible_func(self._filter) | ||
| 38 | self.set_model(self.filter_model) | ||
| 39 | #self.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) | ||
| 40 | self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PACKAGE)) | ||
| 41 | |||
| 42 | def _filter(self, model, iter): | ||
| 43 | (this_type, package) = model.get(iter, COL_DEP_TYPE, COL_DEP_PARENT) | ||
| 44 | if this_type != self.dep_type: return False | ||
| 45 | return package == self.current | ||
| 46 | |||
| 47 | def set_current_package(self, package): | ||
| 48 | self.current = package | ||
| 49 | self.filter_model.refilter() | ||
| 50 | |||
| 51 | class PackageReverseDepView(gtk.TreeView): | ||
| 52 | def __init__(self, model, label): | ||
| 53 | gtk.TreeView.__init__(self) | ||
| 54 | self.current = None | ||
| 55 | self.filter_model = model.filter_new() | ||
| 56 | self.filter_model.set_visible_func(self._filter) | ||
| 57 | self.set_model(self.filter_model) | ||
| 58 | self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PARENT)) | ||
| 59 | |||
| 60 | def _filter(self, model, iter): | ||
| 61 | package = model.get_value(iter, COL_DEP_PACKAGE) | ||
| 62 | return package == self.current | ||
| 63 | |||
| 64 | def set_current_package(self, package): | ||
| 65 | self.current = package | ||
| 66 | self.filter_model.refilter() | ||
| 67 | |||
| 68 | class DepExplorer(gtk.Window): | ||
| 69 | def __init__(self): | ||
| 70 | gtk.Window.__init__(self) | ||
| 71 | self.set_title("Dependency Explorer") | ||
| 72 | self.set_default_size(500, 500) | ||
| 73 | self.connect("delete-event", gtk.main_quit) | ||
| 74 | |||
| 75 | # Create the data models | ||
| 76 | self.pkg_model = gtk.ListStore(gobject.TYPE_STRING) | ||
| 77 | self.depends_model = gtk.ListStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING) | ||
| 78 | |||
| 79 | pane = gtk.HPaned() | ||
| 80 | pane.set_position(250) | ||
| 81 | self.add(pane) | ||
| 82 | |||
| 83 | # The master list of packages | ||
| 84 | scrolled = gtk.ScrolledWindow() | ||
| 85 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
| 86 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
| 87 | self.pkg_treeview = gtk.TreeView(self.pkg_model) | ||
| 88 | self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed) | ||
| 89 | self.pkg_treeview.append_column(gtk.TreeViewColumn("Package", gtk.CellRendererText(), text=COL_PKG_NAME)) | ||
| 90 | pane.add1(scrolled) | ||
| 91 | scrolled.add(self.pkg_treeview) | ||
| 92 | |||
| 93 | box = gtk.VBox(homogeneous=True, spacing=4) | ||
| 94 | |||
| 95 | # Runtime Depends | ||
| 96 | scrolled = gtk.ScrolledWindow() | ||
| 97 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
| 98 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
| 99 | self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends") | ||
| 100 | self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) | ||
| 101 | scrolled.add(self.rdep_treeview) | ||
| 102 | box.add(scrolled) | ||
| 103 | |||
| 104 | # Build Depends | ||
| 105 | scrolled = gtk.ScrolledWindow() | ||
| 106 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
| 107 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
| 108 | self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends") | ||
| 109 | self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) | ||
| 110 | scrolled.add(self.dep_treeview) | ||
| 111 | box.add(scrolled) | ||
| 112 | pane.add2(box) | ||
| 113 | |||
| 114 | # Reverse Depends | ||
| 115 | scrolled = gtk.ScrolledWindow() | ||
| 116 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
| 117 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
| 118 | self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends") | ||
| 119 | self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT) | ||
| 120 | scrolled.add(self.revdep_treeview) | ||
| 121 | box.add(scrolled) | ||
| 122 | pane.add2(box) | ||
| 123 | |||
| 124 | self.show_all() | ||
| 125 | |||
| 126 | def on_package_activated(self, treeview, path, column, data_col): | ||
| 127 | model = treeview.get_model() | ||
| 128 | package = model.get_value(model.get_iter(path), data_col) | ||
| 129 | |||
| 130 | pkg_path = [] | ||
| 131 | def finder(model, path, iter, needle): | ||
| 132 | package = model.get_value(iter, COL_PKG_NAME) | ||
| 133 | if package == needle: | ||
| 134 | pkg_path.append(path) | ||
| 135 | return True | ||
| 136 | else: | ||
| 137 | return False | ||
| 138 | self.pkg_model.foreach(finder, package) | ||
| 139 | if pkg_path: | ||
| 140 | self.pkg_treeview.get_selection().select_path(pkg_path[0]) | ||
| 141 | self.pkg_treeview.scroll_to_cell(pkg_path[0]) | ||
| 142 | |||
| 143 | def on_cursor_changed(self, selection): | ||
| 144 | (model, it) = selection.get_selected() | ||
| 145 | if iter is None: | ||
| 146 | current_package = None | ||
| 147 | else: | ||
| 148 | current_package = model.get_value(it, COL_PKG_NAME) | ||
| 149 | self.rdep_treeview.set_current_package(current_package) | ||
| 150 | self.dep_treeview.set_current_package(current_package) | ||
| 151 | self.revdep_treeview.set_current_package(current_package) | ||
| 152 | |||
| 153 | |||
| 154 | def parse(depgraph, pkg_model, depends_model): | ||
| 155 | |||
| 156 | for package in depgraph["pn"]: | ||
| 157 | pkg_model.set(pkg_model.append(), COL_PKG_NAME, package) | ||
| 158 | |||
| 159 | for package in depgraph["depends"]: | ||
| 160 | for depend in depgraph["depends"][package]: | ||
| 161 | depends_model.set (depends_model.append(), | ||
| 162 | COL_DEP_TYPE, TYPE_DEP, | ||
| 163 | COL_DEP_PARENT, package, | ||
| 164 | COL_DEP_PACKAGE, depend) | ||
| 165 | |||
| 166 | for package in depgraph["rdepends-pn"]: | ||
| 167 | for rdepend in depgraph["rdepends-pn"][package]: | ||
| 168 | depends_model.set (depends_model.append(), | ||
| 169 | COL_DEP_TYPE, TYPE_RDEP, | ||
| 170 | COL_DEP_PARENT, package, | ||
| 171 | COL_DEP_PACKAGE, rdepend) | ||
| 172 | |||
| 173 | class ProgressBar(gtk.Window): | ||
| 174 | def __init__(self): | ||
| 175 | |||
| 176 | gtk.Window.__init__(self) | ||
| 177 | self.set_title("Parsing .bb files, please wait...") | ||
| 178 | self.set_default_size(500, 0) | ||
| 179 | self.connect("delete-event", gtk.main_quit) | ||
| 180 | |||
| 181 | self.progress = gtk.ProgressBar() | ||
| 182 | self.add(self.progress) | ||
| 183 | self.show_all() | ||
| 184 | |||
| 185 | class gtkthread(threading.Thread): | ||
| 186 | quit = threading.Event() | ||
| 187 | def __init__(self, shutdown): | ||
| 188 | threading.Thread.__init__(self) | ||
| 189 | self.setDaemon(True) | ||
| 190 | self.shutdown = shutdown | ||
| 191 | |||
| 192 | def run(self): | ||
| 193 | gobject.threads_init() | ||
| 194 | gtk.gdk.threads_init() | ||
| 195 | gtk.main() | ||
| 196 | gtkthread.quit.set() | ||
| 197 | |||
| 198 | def init(server, eventHandler): | ||
| 199 | |||
| 200 | try: | ||
| 201 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
| 202 | if not cmdline or cmdline[0] != "generateDotGraph": | ||
| 203 | print "This UI is only compatible with the -g option" | ||
| 204 | return | ||
| 205 | ret = server.runCommand(["generateDepTreeEvent", cmdline[1]]) | ||
| 206 | if ret != True: | ||
| 207 | print "Couldn't run command! %s" % ret | ||
| 208 | return | ||
| 209 | except xmlrpclib.Fault, x: | ||
| 210 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
| 211 | return | ||
| 212 | |||
| 213 | shutdown = 0 | ||
| 214 | |||
| 215 | gtkgui = gtkthread(shutdown) | ||
| 216 | gtkgui.start() | ||
| 217 | |||
| 218 | gtk.gdk.threads_enter() | ||
| 219 | pbar = ProgressBar() | ||
| 220 | dep = DepExplorer() | ||
| 221 | gtk.gdk.threads_leave() | ||
| 222 | |||
| 223 | while True: | ||
| 224 | try: | ||
| 225 | event = eventHandler.waitEvent(0.25) | ||
| 226 | if gtkthread.quit.isSet(): | ||
| 227 | break | ||
| 228 | |||
| 229 | if event is None: | ||
| 230 | continue | ||
| 231 | if event[0].startswith('bb.event.ParseProgress'): | ||
| 232 | x = event[1]['sofar'] | ||
| 233 | y = event[1]['total'] | ||
| 234 | if x == y: | ||
| 235 | print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors." | ||
| 236 | % ( event[1]['cached'], event[1]['parsed'], event[1]['skipped'], event[1]['masked'], event[1]['errors'])) | ||
| 237 | pbar.hide() | ||
| 238 | gtk.gdk.threads_enter() | ||
| 239 | pbar.progress.set_fraction(float(x)/float(y)) | ||
| 240 | pbar.progress.set_text("%d/%d (%2d %%)" % (x, y, x*100/y)) | ||
| 241 | gtk.gdk.threads_leave() | ||
| 242 | continue | ||
| 243 | |||
| 244 | if event[0] == "bb.event.DepTreeGenerated": | ||
| 245 | gtk.gdk.threads_enter() | ||
| 246 | parse(event[1]['_depgraph'], dep.pkg_model, dep.depends_model) | ||
| 247 | gtk.gdk.threads_leave() | ||
| 248 | |||
| 249 | if event[0] == 'bb.command.CookerCommandCompleted': | ||
| 250 | continue | ||
| 251 | if event[0] == 'bb.command.CookerCommandFailed': | ||
| 252 | print "Command execution failed: %s" % event[1]['error'] | ||
| 253 | break | ||
| 254 | if event[0] == 'bb.cooker.CookerExit': | ||
| 255 | break | ||
| 256 | |||
| 257 | continue | ||
| 258 | |||
| 259 | except KeyboardInterrupt: | ||
| 260 | if shutdown == 2: | ||
| 261 | print "\nThird Keyboard Interrupt, exit.\n" | ||
| 262 | break | ||
| 263 | if shutdown == 1: | ||
| 264 | print "\nSecond Keyboard Interrupt, stopping...\n" | ||
| 265 | server.runCommand(["stateStop"]) | ||
| 266 | if shutdown == 0: | ||
| 267 | print "\nKeyboard Interrupt, closing down...\n" | ||
| 268 | server.runCommand(["stateShutdown"]) | ||
| 269 | shutdown = shutdown + 1 | ||
| 270 | pass | ||
| 271 | |||
diff --git a/bitbake-dev/lib/bb/ui/knotty.py b/bitbake-dev/lib/bb/ui/knotty.py new file mode 100644 index 0000000000..9e89660307 --- /dev/null +++ b/bitbake-dev/lib/bb/ui/knotty.py | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | # | ||
| 2 | # BitBake (No)TTY UI Implementation | ||
| 3 | # | ||
| 4 | # Handling output to TTYs or files (no TTY) | ||
| 5 | # | ||
| 6 | # Copyright (C) 2006-2007 Richard Purdie | ||
| 7 | # | ||
| 8 | # This program is free software; you can redistribute it and/or modify | ||
| 9 | # it under the terms of the GNU General Public License version 2 as | ||
| 10 | # published by the Free Software Foundation. | ||
| 11 | # | ||
| 12 | # This program is distributed in the hope that it will be useful, | ||
| 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | # GNU General Public License for more details. | ||
| 16 | # | ||
| 17 | # You should have received a copy of the GNU General Public License along | ||
| 18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 20 | |||
| 21 | import os | ||
| 22 | import bb | ||
| 23 | from bb import cooker | ||
| 24 | |||
| 25 | import sys | ||
| 26 | import time | ||
| 27 | import itertools | ||
| 28 | import xmlrpclib | ||
| 29 | |||
| 30 | parsespin = itertools.cycle( r'|/-\\' ) | ||
| 31 | |||
| 32 | def init(server, eventHandler): | ||
| 33 | |||
| 34 | # Get values of variables which control our output | ||
| 35 | includelogs = server.runCommand(["readVariable", "BBINCLUDELOGS"]) | ||
| 36 | loglines = server.runCommand(["readVariable", "BBINCLUDELOGS_LINES"]) | ||
| 37 | |||
| 38 | try: | ||
| 39 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
| 40 | #print cmdline | ||
| 41 | if not cmdline: | ||
| 42 | return 1 | ||
| 43 | ret = server.runCommand(cmdline) | ||
| 44 | if ret != True: | ||
| 45 | print "Couldn't get default commandline! %s" % ret | ||
| 46 | return 1 | ||
| 47 | except xmlrpclib.Fault, x: | ||
| 48 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
| 49 | return 1 | ||
| 50 | |||
| 51 | shutdown = 0 | ||
| 52 | return_value = 0 | ||
| 53 | while True: | ||
| 54 | try: | ||
| 55 | event = eventHandler.waitEvent(0.25) | ||
| 56 | if event is None: | ||
| 57 | continue | ||
| 58 | #print event | ||
| 59 | if event[0].startswith('bb.event.Pkg'): | ||
| 60 | print "NOTE: %s" % event[1]['_message'] | ||
| 61 | continue | ||
| 62 | if event[0].startswith('bb.msg.MsgPlain'): | ||
| 63 | print event[1]['_message'] | ||
| 64 | continue | ||
| 65 | if event[0].startswith('bb.msg.MsgDebug'): | ||
| 66 | print 'DEBUG: ' + event[1]['_message'] | ||
| 67 | continue | ||
| 68 | if event[0].startswith('bb.msg.MsgNote'): | ||
| 69 | print 'NOTE: ' + event[1]['_message'] | ||
| 70 | continue | ||
| 71 | if event[0].startswith('bb.msg.MsgWarn'): | ||
| 72 | print 'WARNING: ' + event[1]['_message'] | ||
| 73 | continue | ||
| 74 | if event[0].startswith('bb.msg.MsgError'): | ||
| 75 | return_value = 1 | ||
| 76 | print 'ERROR: ' + event[1]['_message'] | ||
| 77 | continue | ||
| 78 | if event[0].startswith('bb.build.TaskFailed'): | ||
| 79 | return_value = 1 | ||
| 80 | logfile = event[1]['logfile'] | ||
| 81 | if logfile: | ||
| 82 | print "ERROR: Logfile of failure stored in %s." % logfile | ||
| 83 | if includelogs: | ||
| 84 | print "Log data follows:" | ||
| 85 | f = open(logfile, "r") | ||
| 86 | lines = [] | ||
| 87 | while True: | ||
| 88 | l = f.readline() | ||
| 89 | if l == '': | ||
| 90 | break | ||
| 91 | l = l.rstrip() | ||
| 92 | if loglines: | ||
| 93 | lines.append(' | %s' % l) | ||
| 94 | if len(lines) > int(loglines): | ||
| 95 | lines.pop(0) | ||
| 96 | else: | ||
| 97 | print '| %s' % l | ||
| 98 | f.close() | ||
| 99 | if lines: | ||
| 100 | for line in lines: | ||
| 101 | print line | ||
| 102 | if event[0].startswith('bb.build.Task'): | ||
| 103 | print "NOTE: %s" % event[1]['_message'] | ||
| 104 | continue | ||
| 105 | if event[0].startswith('bb.event.ParseProgress'): | ||
| 106 | x = event[1]['sofar'] | ||
| 107 | y = event[1]['total'] | ||
| 108 | if os.isatty(sys.stdout.fileno()): | ||
| 109 | sys.stdout.write("\rNOTE: Handling BitBake files: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) | ||
| 110 | sys.stdout.flush() | ||
| 111 | else: | ||
| 112 | if x == 1: | ||
| 113 | sys.stdout.write("Parsing .bb files, please wait...") | ||
| 114 | sys.stdout.flush() | ||
| 115 | if x == y: | ||
| 116 | sys.stdout.write("done.") | ||
| 117 | sys.stdout.flush() | ||
| 118 | if x == y: | ||
| 119 | print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors." | ||
| 120 | % ( event[1]['cached'], event[1]['parsed'], event[1]['skipped'], event[1]['masked'], event[1]['errors'])) | ||
| 121 | continue | ||
| 122 | |||
| 123 | if event[0] == 'bb.command.CookerCommandCompleted': | ||
| 124 | break | ||
| 125 | if event[0] == 'bb.command.CookerCommandFailed': | ||
| 126 | return_value = 1 | ||
| 127 | print "Command execution failed: %s" % event[1]['error'] | ||
| 128 | break | ||
| 129 | if event[0] == 'bb.cooker.CookerExit': | ||
| 130 | break | ||
| 131 | |||
| 132 | # ignore | ||
| 133 | if event[0].startswith('bb.event.BuildStarted'): | ||
| 134 | continue | ||
| 135 | if event[0].startswith('bb.event.BuildCompleted'): | ||
| 136 | continue | ||
| 137 | if event[0].startswith('bb.event.MultipleProviders'): | ||
| 138 | continue | ||
| 139 | if event[0].startswith('bb.runqueue.runQueue'): | ||
| 140 | continue | ||
| 141 | if event[0].startswith('bb.event.StampUpdate'): | ||
| 142 | continue | ||
| 143 | print "Unknown Event: %s" % event | ||
| 144 | |||
| 145 | except KeyboardInterrupt: | ||
| 146 | if shutdown == 2: | ||
| 147 | print "\nThird Keyboard Interrupt, exit.\n" | ||
| 148 | break | ||
| 149 | if shutdown == 1: | ||
| 150 | print "\nSecond Keyboard Interrupt, stopping...\n" | ||
| 151 | server.runCommand(["stateStop"]) | ||
| 152 | if shutdown == 0: | ||
| 153 | print "\nKeyboard Interrupt, closing down...\n" | ||
| 154 | server.runCommand(["stateShutdown"]) | ||
| 155 | shutdown = shutdown + 1 | ||
| 156 | pass | ||
| 157 | return return_value | ||
diff --git a/bitbake-dev/lib/bb/ui/ncurses.py b/bitbake-dev/lib/bb/ui/ncurses.py new file mode 100644 index 0000000000..1476baa61f --- /dev/null +++ b/bitbake-dev/lib/bb/ui/ncurses.py | |||
| @@ -0,0 +1,333 @@ | |||
| 1 | # | ||
| 2 | # BitBake Curses UI Implementation | ||
| 3 | # | ||
| 4 | # Implements an ncurses frontend for the BitBake utility. | ||
| 5 | # | ||
| 6 | # Copyright (C) 2006 Michael 'Mickey' Lauer | ||
| 7 | # Copyright (C) 2006-2007 Richard Purdie | ||
| 8 | # | ||
| 9 | # This program is free software; you can redistribute it and/or modify | ||
| 10 | # it under the terms of the GNU General Public License version 2 as | ||
| 11 | # published by the Free Software Foundation. | ||
| 12 | # | ||
| 13 | # This program is distributed in the hope that it will be useful, | ||
| 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | # GNU General Public License for more details. | ||
| 17 | # | ||
| 18 | # You should have received a copy of the GNU General Public License along | ||
| 19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 21 | |||
| 22 | """ | ||
| 23 | We have the following windows: | ||
| 24 | |||
| 25 | 1.) Main Window: Shows what we are ultimately building and how far we are. Includes status bar | ||
| 26 | 2.) Thread Activity Window: Shows one status line for every concurrent bitbake thread. | ||
| 27 | 3.) Command Line Window: Contains an interactive command line where you can interact w/ Bitbake. | ||
| 28 | |||
| 29 | Basic window layout is like that: | ||
| 30 | |||
| 31 | |---------------------------------------------------------| | ||
| 32 | | <Main Window> | <Thread Activity Window> | | ||
| 33 | | | 0: foo do_compile complete| | ||
| 34 | | Building Gtk+-2.6.10 | 1: bar do_patch complete | | ||
| 35 | | Status: 60% | ... | | ||
| 36 | | | ... | | ||
| 37 | | | ... | | ||
| 38 | |---------------------------------------------------------| | ||
| 39 | |<Command Line Window> | | ||
| 40 | |>>> which virtual/kernel | | ||
| 41 | |openzaurus-kernel | | ||
| 42 | |>>> _ | | ||
| 43 | |---------------------------------------------------------| | ||
| 44 | |||
| 45 | """ | ||
| 46 | |||
| 47 | import os, sys, curses, time, random, threading, itertools, time | ||
| 48 | from curses.textpad import Textbox | ||
| 49 | import bb | ||
| 50 | from bb import ui | ||
| 51 | from bb.ui import uihelper | ||
| 52 | |||
| 53 | parsespin = itertools.cycle( r'|/-\\' ) | ||
| 54 | |||
| 55 | X = 0 | ||
| 56 | Y = 1 | ||
| 57 | WIDTH = 2 | ||
| 58 | HEIGHT = 3 | ||
| 59 | |||
| 60 | MAXSTATUSLENGTH = 32 | ||
| 61 | |||
| 62 | class NCursesUI: | ||
| 63 | """ | ||
| 64 | NCurses UI Class | ||
| 65 | """ | ||
| 66 | class Window: | ||
| 67 | """Base Window Class""" | ||
| 68 | def __init__( self, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ): | ||
| 69 | self.win = curses.newwin( height, width, y, x ) | ||
| 70 | self.dimensions = ( x, y, width, height ) | ||
| 71 | """ | ||
| 72 | if curses.has_colors(): | ||
| 73 | color = 1 | ||
| 74 | curses.init_pair( color, fg, bg ) | ||
| 75 | self.win.bkgdset( ord(' '), curses.color_pair(color) ) | ||
| 76 | else: | ||
| 77 | self.win.bkgdset( ord(' '), curses.A_BOLD ) | ||
| 78 | """ | ||
| 79 | self.erase() | ||
| 80 | self.setScrolling() | ||
| 81 | self.win.noutrefresh() | ||
| 82 | |||
| 83 | def erase( self ): | ||
| 84 | self.win.erase() | ||
| 85 | |||
| 86 | def setScrolling( self, b = True ): | ||
| 87 | self.win.scrollok( b ) | ||
| 88 | self.win.idlok( b ) | ||
| 89 | |||
| 90 | def setBoxed( self ): | ||
| 91 | self.boxed = True | ||
| 92 | self.win.box() | ||
| 93 | self.win.noutrefresh() | ||
| 94 | |||
| 95 | def setText( self, x, y, text, *args ): | ||
| 96 | self.win.addstr( y, x, text, *args ) | ||
| 97 | self.win.noutrefresh() | ||
| 98 | |||
| 99 | def appendText( self, text, *args ): | ||
| 100 | self.win.addstr( text, *args ) | ||
| 101 | self.win.noutrefresh() | ||
| 102 | |||
| 103 | def drawHline( self, y ): | ||
| 104 | self.win.hline( y, 0, curses.ACS_HLINE, self.dimensions[WIDTH] ) | ||
| 105 | self.win.noutrefresh() | ||
| 106 | |||
| 107 | class DecoratedWindow( Window ): | ||
| 108 | """Base class for windows with a box and a title bar""" | ||
| 109 | def __init__( self, title, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ): | ||
| 110 | NCursesUI.Window.__init__( self, x+1, y+3, width-2, height-4, fg, bg ) | ||
| 111 | self.decoration = NCursesUI.Window( x, y, width, height, fg, bg ) | ||
| 112 | self.decoration.setBoxed() | ||
| 113 | self.decoration.win.hline( 2, 1, curses.ACS_HLINE, width-2 ) | ||
| 114 | self.setTitle( title ) | ||
| 115 | |||
| 116 | def setTitle( self, title ): | ||
| 117 | self.decoration.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) | ||
| 118 | |||
| 119 | #-------------------------------------------------------------------------# | ||
| 120 | # class TitleWindow( Window ): | ||
| 121 | #-------------------------------------------------------------------------# | ||
| 122 | # """Title Window""" | ||
| 123 | # def __init__( self, x, y, width, height ): | ||
| 124 | # NCursesUI.Window.__init__( self, x, y, width, height ) | ||
| 125 | # version = bb.__version__ | ||
| 126 | # title = "BitBake %s" % version | ||
| 127 | # credit = "(C) 2003-2007 Team BitBake" | ||
| 128 | # #self.win.hline( 2, 1, curses.ACS_HLINE, width-2 ) | ||
| 129 | # self.win.border() | ||
| 130 | # self.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) | ||
| 131 | # self.setText( 1, 2, credit.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) | ||
| 132 | |||
| 133 | #-------------------------------------------------------------------------# | ||
| 134 | class ThreadActivityWindow( DecoratedWindow ): | ||
| 135 | #-------------------------------------------------------------------------# | ||
| 136 | """Thread Activity Window""" | ||
| 137 | def __init__( self, x, y, width, height ): | ||
| 138 | NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height ) | ||
| 139 | |||
| 140 | def setStatus( self, thread, text ): | ||
| 141 | line = "%02d: %s" % ( thread, text ) | ||
| 142 | width = self.dimensions[WIDTH] | ||
| 143 | if ( len(line) > width ): | ||
| 144 | line = line[:width-3] + "..." | ||
| 145 | else: | ||
| 146 | line = line.ljust( width ) | ||
| 147 | self.setText( 0, thread, line ) | ||
| 148 | |||
| 149 | #-------------------------------------------------------------------------# | ||
| 150 | class MainWindow( DecoratedWindow ): | ||
| 151 | #-------------------------------------------------------------------------# | ||
| 152 | """Main Window""" | ||
| 153 | def __init__( self, x, y, width, height ): | ||
| 154 | self.StatusPosition = width - MAXSTATUSLENGTH | ||
| 155 | NCursesUI.DecoratedWindow.__init__( self, None, x, y, width, height ) | ||
| 156 | curses.nl() | ||
| 157 | |||
| 158 | def setTitle( self, title ): | ||
| 159 | title = "BitBake %s" % bb.__version__ | ||
| 160 | self.decoration.setText( 2, 1, title, curses.A_BOLD ) | ||
| 161 | self.decoration.setText( self.StatusPosition - 8, 1, "Status:", curses.A_BOLD ) | ||
| 162 | |||
| 163 | def setStatus(self, status): | ||
| 164 | while len(status) < MAXSTATUSLENGTH: | ||
| 165 | status = status + " " | ||
| 166 | self.decoration.setText( self.StatusPosition, 1, status, curses.A_BOLD ) | ||
| 167 | |||
| 168 | |||
| 169 | #-------------------------------------------------------------------------# | ||
| 170 | class ShellOutputWindow( DecoratedWindow ): | ||
| 171 | #-------------------------------------------------------------------------# | ||
| 172 | """Interactive Command Line Output""" | ||
| 173 | def __init__( self, x, y, width, height ): | ||
| 174 | NCursesUI.DecoratedWindow.__init__( self, "Command Line Window", x, y, width, height ) | ||
| 175 | |||
| 176 | #-------------------------------------------------------------------------# | ||
| 177 | class ShellInputWindow( Window ): | ||
| 178 | #-------------------------------------------------------------------------# | ||
| 179 | """Interactive Command Line Input""" | ||
| 180 | def __init__( self, x, y, width, height ): | ||
| 181 | NCursesUI.Window.__init__( self, x, y, width, height ) | ||
| 182 | |||
| 183 | # self.textbox = Textbox( self.win ) | ||
| 184 | # t = threading.Thread() | ||
| 185 | # t.run = self.textbox.edit | ||
| 186 | # t.start() | ||
| 187 | |||
| 188 | #-------------------------------------------------------------------------# | ||
| 189 | def main(self, stdscr, server, eventHandler): | ||
| 190 | #-------------------------------------------------------------------------# | ||
| 191 | height, width = stdscr.getmaxyx() | ||
| 192 | |||
| 193 | # for now split it like that: | ||
| 194 | # MAIN_y + THREAD_y = 2/3 screen at the top | ||
| 195 | # MAIN_x = 2/3 left, THREAD_y = 1/3 right | ||
| 196 | # CLI_y = 1/3 of screen at the bottom | ||
| 197 | # CLI_x = full | ||
| 198 | |||
| 199 | main_left = 0 | ||
| 200 | main_top = 0 | ||
| 201 | main_height = ( height / 3 * 2 ) | ||
| 202 | main_width = ( width / 3 ) * 2 | ||
| 203 | clo_left = main_left | ||
| 204 | clo_top = main_top + main_height | ||
| 205 | clo_height = height - main_height - main_top - 1 | ||
| 206 | clo_width = width | ||
| 207 | cli_left = main_left | ||
| 208 | cli_top = clo_top + clo_height | ||
| 209 | cli_height = 1 | ||
| 210 | cli_width = width | ||
| 211 | thread_left = main_left + main_width | ||
| 212 | thread_top = main_top | ||
| 213 | thread_height = main_height | ||
| 214 | thread_width = width - main_width | ||
| 215 | |||
| 216 | #tw = self.TitleWindow( 0, 0, width, main_top ) | ||
| 217 | mw = self.MainWindow( main_left, main_top, main_width, main_height ) | ||
| 218 | taw = self.ThreadActivityWindow( thread_left, thread_top, thread_width, thread_height ) | ||
| 219 | clo = self.ShellOutputWindow( clo_left, clo_top, clo_width, clo_height ) | ||
| 220 | cli = self.ShellInputWindow( cli_left, cli_top, cli_width, cli_height ) | ||
| 221 | cli.setText( 0, 0, "BB>" ) | ||
| 222 | |||
| 223 | mw.setStatus("Idle") | ||
| 224 | |||
| 225 | helper = uihelper.BBUIHelper() | ||
| 226 | shutdown = 0 | ||
| 227 | |||
| 228 | try: | ||
| 229 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
| 230 | if not cmdline: | ||
| 231 | return | ||
| 232 | ret = server.runCommand(cmdline) | ||
| 233 | if ret != True: | ||
| 234 | print "Couldn't get default commandlind! %s" % ret | ||
| 235 | return | ||
| 236 | except xmlrpclib.Fault, x: | ||
| 237 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
| 238 | return | ||
| 239 | |||
| 240 | exitflag = False | ||
| 241 | while not exitflag: | ||
| 242 | try: | ||
| 243 | event = eventHandler.waitEvent(0.25) | ||
| 244 | if not event: | ||
| 245 | continue | ||
| 246 | helper.eventHandler(event) | ||
| 247 | #mw.appendText("%s\n" % event[0]) | ||
| 248 | if event[0].startswith('bb.event.Pkg'): | ||
| 249 | mw.appendText("NOTE: %s\n" % event[1]['_message']) | ||
| 250 | if event[0].startswith('bb.build.Task'): | ||
| 251 | mw.appendText("NOTE: %s\n" % event[1]['_message']) | ||
| 252 | if event[0].startswith('bb.msg.MsgDebug'): | ||
| 253 | mw.appendText('DEBUG: ' + event[1]['_message'] + '\n') | ||
| 254 | if event[0].startswith('bb.msg.MsgNote'): | ||
| 255 | mw.appendText('NOTE: ' + event[1]['_message'] + '\n') | ||
| 256 | if event[0].startswith('bb.msg.MsgWarn'): | ||
| 257 | mw.appendText('WARNING: ' + event[1]['_message'] + '\n') | ||
| 258 | if event[0].startswith('bb.msg.MsgError'): | ||
| 259 | mw.appendText('ERROR: ' + event[1]['_message'] + '\n') | ||
| 260 | if event[0].startswith('bb.msg.MsgFatal'): | ||
| 261 | mw.appendText('FATAL: ' + event[1]['_message'] + '\n') | ||
| 262 | if event[0].startswith('bb.event.ParseProgress'): | ||
| 263 | x = event[1]['sofar'] | ||
| 264 | y = event[1]['total'] | ||
| 265 | if x == y: | ||
| 266 | mw.setStatus("Idle") | ||
| 267 | mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked." | ||
| 268 | % ( event[1]['cached'], event[1]['parsed'], event[1]['skipped'], event[1]['masked'] )) | ||
| 269 | else: | ||
| 270 | mw.setStatus("Parsing: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) | ||
| 271 | # if event[0].startswith('bb.build.TaskFailed'): | ||
| 272 | # if event[1]['logfile']: | ||
| 273 | # if data.getVar("BBINCLUDELOGS", d): | ||
| 274 | # bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile) | ||
| 275 | # number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d) | ||
| 276 | # if number_of_lines: | ||
| 277 | # os.system('tail -n%s %s' % (number_of_lines, logfile)) | ||
| 278 | # else: | ||
| 279 | # f = open(logfile, "r") | ||
| 280 | # while True: | ||
| 281 | # l = f.readline() | ||
| 282 | # if l == '': | ||
| 283 | # break | ||
| 284 | # l = l.rstrip() | ||
| 285 | # print '| %s' % l | ||
| 286 | # f.close() | ||
| 287 | # else: | ||
| 288 | # bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile) | ||
| 289 | |||
| 290 | if event[0] == 'bb.command.CookerCommandCompleted': | ||
| 291 | exitflag = True | ||
| 292 | if event[0] == 'bb.command.CookerCommandFailed': | ||
| 293 | mw.appendText("Command execution failed: %s" % event[1]['error']) | ||
| 294 | time.sleep(2) | ||
| 295 | exitflag = True | ||
| 296 | if event[0] == 'bb.cooker.CookerExit': | ||
| 297 | exitflag = True | ||
| 298 | |||
| 299 | if helper.needUpdate: | ||
| 300 | activetasks, failedtasks = helper.getTasks() | ||
| 301 | taw.erase() | ||
| 302 | taw.setText(0, 0, "") | ||
| 303 | if activetasks: | ||
| 304 | taw.appendText("Active Tasks:\n") | ||
| 305 | for task in activetasks: | ||
| 306 | taw.appendText(task) | ||
| 307 | if failedtasks: | ||
| 308 | taw.appendText("Failed Tasks:\n") | ||
| 309 | for task in failedtasks: | ||
| 310 | taw.appendText(task) | ||
| 311 | |||
| 312 | curses.doupdate() | ||
| 313 | except KeyboardInterrupt: | ||
| 314 | if shutdown == 2: | ||
| 315 | mw.appendText("Third Keyboard Interrupt, exit.\n") | ||
| 316 | exitflag = True | ||
| 317 | if shutdown == 1: | ||
| 318 | mw.appendText("Second Keyboard Interrupt, stopping...\n") | ||
| 319 | server.runCommand(["stateStop"]) | ||
| 320 | if shutdown == 0: | ||
| 321 | mw.appendText("Keyboard Interrupt, closing down...\n") | ||
| 322 | server.runCommand(["stateShutdown"]) | ||
| 323 | shutdown = shutdown + 1 | ||
| 324 | pass | ||
| 325 | |||
| 326 | def init(server, eventHandler): | ||
| 327 | ui = NCursesUI() | ||
| 328 | try: | ||
| 329 | curses.wrapper(ui.main, server, eventHandler) | ||
| 330 | except: | ||
| 331 | import traceback | ||
| 332 | traceback.print_exc() | ||
| 333 | |||
diff --git a/bitbake-dev/lib/bb/ui/uievent.py b/bitbake-dev/lib/bb/ui/uievent.py new file mode 100644 index 0000000000..9d724d7fc5 --- /dev/null +++ b/bitbake-dev/lib/bb/ui/uievent.py | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
| 5 | # Copyright (C) 2006 - 2007 Richard Purdie | ||
| 6 | # | ||
| 7 | # This program is free software; you can redistribute it and/or modify | ||
| 8 | # it under the terms of the GNU General Public License version 2 as | ||
| 9 | # published by the Free Software Foundation. | ||
| 10 | # | ||
| 11 | # This program is distributed in the hope that it will be useful, | ||
| 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | # GNU General Public License for more details. | ||
| 15 | # | ||
| 16 | # You should have received a copy of the GNU General Public License along | ||
| 17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | |||
| 20 | |||
| 21 | """ | ||
| 22 | Use this class to fork off a thread to recieve event callbacks from the bitbake | ||
| 23 | server and queue them for the UI to process. This process must be used to avoid | ||
| 24 | client/server deadlocks. | ||
| 25 | """ | ||
| 26 | |||
| 27 | import sys, socket, threading | ||
| 28 | from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler | ||
| 29 | |||
| 30 | class BBUIEventQueue: | ||
| 31 | def __init__(self, BBServer): | ||
| 32 | |||
| 33 | self.eventQueue = [] | ||
| 34 | self.eventQueueLock = threading.Lock() | ||
| 35 | self.eventQueueNotify = threading.Event() | ||
| 36 | |||
| 37 | self.BBServer = BBServer | ||
| 38 | |||
| 39 | self.t = threading.Thread() | ||
| 40 | self.t.setDaemon(True) | ||
| 41 | self.t.run = self.startCallbackHandler | ||
| 42 | self.t.start() | ||
| 43 | |||
| 44 | def getEvent(self): | ||
| 45 | |||
| 46 | self.eventQueueLock.acquire() | ||
| 47 | |||
| 48 | if len(self.eventQueue) == 0: | ||
| 49 | self.eventQueueLock.release() | ||
| 50 | return None | ||
| 51 | |||
| 52 | item = self.eventQueue.pop(0) | ||
| 53 | |||
| 54 | if len(self.eventQueue) == 0: | ||
| 55 | self.eventQueueNotify.clear() | ||
| 56 | |||
| 57 | self.eventQueueLock.release() | ||
| 58 | |||
| 59 | return item | ||
| 60 | |||
| 61 | def waitEvent(self, delay): | ||
| 62 | self.eventQueueNotify.wait(delay) | ||
| 63 | return self.getEvent() | ||
| 64 | |||
| 65 | def queue_event(self, event): | ||
| 66 | |||
| 67 | self.eventQueueLock.acquire() | ||
| 68 | self.eventQueue.append(event) | ||
| 69 | self.eventQueueNotify.set() | ||
| 70 | self.eventQueueLock.release() | ||
| 71 | |||
| 72 | def startCallbackHandler(self): | ||
| 73 | |||
| 74 | server = UIXMLRPCServer() | ||
| 75 | self.host, self.port = server.socket.getsockname() | ||
| 76 | |||
| 77 | server.register_function( self.system_quit, "event.quit" ) | ||
| 78 | server.register_function( self.queue_event, "event.send" ) | ||
| 79 | server.socket.settimeout(1) | ||
| 80 | |||
| 81 | self.EventHandle = self.BBServer.registerEventHandler(self.host, self.port) | ||
| 82 | |||
| 83 | self.server = server | ||
| 84 | while not server.quit: | ||
| 85 | server.handle_request() | ||
| 86 | server.server_close() | ||
| 87 | |||
| 88 | def system_quit( self ): | ||
| 89 | """ | ||
| 90 | Shut down the callback thread | ||
| 91 | """ | ||
| 92 | try: | ||
| 93 | self.BBServer.unregisterEventHandler(self.EventHandle) | ||
| 94 | except: | ||
| 95 | pass | ||
| 96 | self.server.quit = True | ||
| 97 | |||
| 98 | class UIXMLRPCServer (SimpleXMLRPCServer): | ||
| 99 | |||
| 100 | def __init__( self, interface = ("localhost", 0) ): | ||
| 101 | self.quit = False | ||
| 102 | SimpleXMLRPCServer.__init__( self, | ||
| 103 | interface, | ||
| 104 | requestHandler=SimpleXMLRPCRequestHandler, | ||
| 105 | logRequests=False, allow_none=True) | ||
| 106 | |||
| 107 | def get_request(self): | ||
| 108 | while not self.quit: | ||
| 109 | try: | ||
| 110 | sock, addr = self.socket.accept() | ||
| 111 | sock.settimeout(1) | ||
| 112 | return (sock, addr) | ||
| 113 | except socket.timeout: | ||
| 114 | pass | ||
| 115 | return (None,None) | ||
| 116 | |||
| 117 | def close_request(self, request): | ||
| 118 | if request is None: | ||
| 119 | return | ||
| 120 | SimpleXMLRPCServer.close_request(self, request) | ||
| 121 | |||
| 122 | def process_request(self, request, client_address): | ||
| 123 | if request is None: | ||
| 124 | return | ||
| 125 | SimpleXMLRPCServer.process_request(self, request, client_address) | ||
| 126 | |||
| 127 | |||
diff --git a/bitbake-dev/lib/bb/ui/uihelper.py b/bitbake-dev/lib/bb/ui/uihelper.py new file mode 100644 index 0000000000..246844c9d2 --- /dev/null +++ b/bitbake-dev/lib/bb/ui/uihelper.py | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | # | ||
| 4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
| 5 | # Copyright (C) 2006 - 2007 Richard Purdie | ||
| 6 | # | ||
| 7 | # This program is free software; you can redistribute it and/or modify | ||
| 8 | # it under the terms of the GNU General Public License version 2 as | ||
| 9 | # published by the Free Software Foundation. | ||
| 10 | # | ||
| 11 | # This program is distributed in the hope that it will be useful, | ||
| 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | # GNU General Public License for more details. | ||
| 15 | # | ||
| 16 | # You should have received a copy of the GNU General Public License along | ||
| 17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | |||
| 20 | class BBUIHelper: | ||
| 21 | def __init__(self): | ||
| 22 | self.needUpdate = False | ||
| 23 | self.running_tasks = {} | ||
| 24 | self.failed_tasks = {} | ||
| 25 | |||
| 26 | def eventHandler(self, event): | ||
| 27 | if event[0].startswith('bb.build.TaskStarted'): | ||
| 28 | self.running_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])] = "" | ||
| 29 | self.needUpdate = True | ||
| 30 | if event[0].startswith('bb.build.TaskSucceeded'): | ||
| 31 | del self.running_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])] | ||
| 32 | self.needUpdate = True | ||
| 33 | if event[0].startswith('bb.build.TaskFailed'): | ||
| 34 | del self.running_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])] | ||
| 35 | self.failed_tasks["%s %s\n" % (event[1]['_package'], event[1]['_task'])] = "" | ||
| 36 | self.needUpdate = True | ||
| 37 | |||
| 38 | # Add runqueue event handling | ||
| 39 | #if event[0].startswith('bb.runqueue.runQueueTaskCompleted'): | ||
| 40 | # a = 1 | ||
| 41 | #if event[0].startswith('bb.runqueue.runQueueTaskStarted'): | ||
| 42 | # a = 1 | ||
| 43 | #if event[0].startswith('bb.runqueue.runQueueTaskFailed'): | ||
| 44 | # a = 1 | ||
| 45 | #if event[0].startswith('bb.runqueue.runQueueExitWait'): | ||
| 46 | # a = 1 | ||
| 47 | |||
| 48 | def getTasks(self): | ||
| 49 | return (self.running_tasks, self.failed_tasks) | ||
diff --git a/bitbake-dev/lib/bb/utils.py b/bitbake-dev/lib/bb/utils.py new file mode 100644 index 0000000000..17e22e389e --- /dev/null +++ b/bitbake-dev/lib/bb/utils.py | |||
| @@ -0,0 +1,270 @@ | |||
| 1 | # ex:ts=4:sw=4:sts=4:et | ||
| 2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
| 3 | """ | ||
| 4 | BitBake Utility Functions | ||
| 5 | """ | ||
| 6 | |||
| 7 | # Copyright (C) 2004 Michael Lauer | ||
| 8 | # | ||
| 9 | # This program is free software; you can redistribute it and/or modify | ||
| 10 | # it under the terms of the GNU General Public License version 2 as | ||
| 11 | # published by the Free Software Foundation. | ||
| 12 | # | ||
| 13 | # This program is distributed in the hope that it will be useful, | ||
| 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | # GNU General Public License for more details. | ||
| 17 | # | ||
| 18 | # You should have received a copy of the GNU General Public License along | ||
| 19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 21 | |||
| 22 | digits = "0123456789" | ||
| 23 | ascii_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" | ||
| 24 | |||
| 25 | import re, fcntl, os | ||
| 26 | |||
| 27 | def explode_version(s): | ||
| 28 | r = [] | ||
| 29 | alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$') | ||
| 30 | numeric_regexp = re.compile('^(\d+)(.*)$') | ||
| 31 | while (s != ''): | ||
| 32 | if s[0] in digits: | ||
| 33 | m = numeric_regexp.match(s) | ||
| 34 | r.append(int(m.group(1))) | ||
| 35 | s = m.group(2) | ||
| 36 | continue | ||
| 37 | if s[0] in ascii_letters: | ||
| 38 | m = alpha_regexp.match(s) | ||
| 39 | r.append(m.group(1)) | ||
| 40 | s = m.group(2) | ||
| 41 | continue | ||
| 42 | s = s[1:] | ||
| 43 | return r | ||
| 44 | |||
| 45 | def vercmp_part(a, b): | ||
| 46 | va = explode_version(a) | ||
| 47 | vb = explode_version(b) | ||
| 48 | while True: | ||
| 49 | if va == []: | ||
| 50 | ca = None | ||
| 51 | else: | ||
| 52 | ca = va.pop(0) | ||
| 53 | if vb == []: | ||
| 54 | cb = None | ||
| 55 | else: | ||
| 56 | cb = vb.pop(0) | ||
| 57 | if ca == None and cb == None: | ||
| 58 | return 0 | ||
| 59 | if ca > cb: | ||
| 60 | return 1 | ||
| 61 | if ca < cb: | ||
| 62 | return -1 | ||
| 63 | |||
| 64 | def vercmp(ta, tb): | ||
| 65 | (ea, va, ra) = ta | ||
| 66 | (eb, vb, rb) = tb | ||
| 67 | |||
| 68 | r = int(ea)-int(eb) | ||
| 69 | if (r == 0): | ||
| 70 | r = vercmp_part(va, vb) | ||
| 71 | if (r == 0): | ||
| 72 | r = vercmp_part(ra, rb) | ||
| 73 | return r | ||
| 74 | |||
| 75 | def explode_deps(s): | ||
| 76 | """ | ||
| 77 | Take an RDEPENDS style string of format: | ||
| 78 | "DEPEND1 (optional version) DEPEND2 (optional version) ..." | ||
| 79 | and return a list of dependencies. | ||
| 80 | Version information is ignored. | ||
| 81 | """ | ||
| 82 | r = [] | ||
| 83 | l = s.split() | ||
| 84 | flag = False | ||
| 85 | for i in l: | ||
| 86 | if i[0] == '(': | ||
| 87 | flag = True | ||
| 88 | #j = [] | ||
| 89 | if not flag: | ||
| 90 | r.append(i) | ||
| 91 | #else: | ||
| 92 | # j.append(i) | ||
| 93 | if flag and i.endswith(')'): | ||
| 94 | flag = False | ||
| 95 | # Ignore version | ||
| 96 | #r[-1] += ' ' + ' '.join(j) | ||
| 97 | return r | ||
| 98 | |||
| 99 | |||
| 100 | |||
| 101 | def _print_trace(body, line): | ||
| 102 | """ | ||
| 103 | Print the Environment of a Text Body | ||
| 104 | """ | ||
| 105 | import bb | ||
| 106 | |||
| 107 | # print the environment of the method | ||
| 108 | bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function") | ||
| 109 | min_line = max(1,line-4) | ||
| 110 | max_line = min(line+4,len(body)-1) | ||
| 111 | for i in range(min_line,max_line+1): | ||
| 112 | bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) ) | ||
| 113 | |||
| 114 | |||
| 115 | def better_compile(text, file, realfile): | ||
| 116 | """ | ||
| 117 | A better compile method. This method | ||
| 118 | will print the offending lines. | ||
| 119 | """ | ||
| 120 | try: | ||
| 121 | return compile(text, file, "exec") | ||
| 122 | except Exception, e: | ||
| 123 | import bb,sys | ||
| 124 | |||
| 125 | # split the text into lines again | ||
| 126 | body = text.split('\n') | ||
| 127 | bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile) | ||
| 128 | bb.msg.error(bb.msg.domain.Util, "The lines resulting into this error were:") | ||
| 129 | bb.msg.error(bb.msg.domain.Util, "\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1])) | ||
| 130 | |||
| 131 | _print_trace(body, e.lineno) | ||
| 132 | |||
| 133 | # exit now | ||
| 134 | sys.exit(1) | ||
| 135 | |||
| 136 | def better_exec(code, context, text, realfile): | ||
| 137 | """ | ||
| 138 | Similiar to better_compile, better_exec will | ||
| 139 | print the lines that are responsible for the | ||
| 140 | error. | ||
| 141 | """ | ||
| 142 | import bb,sys | ||
| 143 | try: | ||
| 144 | exec code in context | ||
| 145 | except: | ||
| 146 | (t,value,tb) = sys.exc_info() | ||
| 147 | |||
| 148 | if t in [bb.parse.SkipPackage, bb.build.FuncFailed]: | ||
| 149 | raise | ||
| 150 | |||
| 151 | # print the Header of the Error Message | ||
| 152 | bb.msg.error(bb.msg.domain.Util, "Error in executing python function in: ", realfile) | ||
| 153 | bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t,value) ) | ||
| 154 | |||
| 155 | # let us find the line number now | ||
| 156 | while tb.tb_next: | ||
| 157 | tb = tb.tb_next | ||
| 158 | |||
| 159 | import traceback | ||
| 160 | line = traceback.tb_lineno(tb) | ||
| 161 | |||
| 162 | _print_trace( text.split('\n'), line ) | ||
| 163 | |||
| 164 | raise | ||
| 165 | |||
| 166 | def Enum(*names): | ||
| 167 | """ | ||
| 168 | A simple class to give Enum support | ||
| 169 | """ | ||
| 170 | |||
| 171 | assert names, "Empty enums are not supported" | ||
| 172 | |||
| 173 | class EnumClass(object): | ||
| 174 | __slots__ = names | ||
| 175 | def __iter__(self): return iter(constants) | ||
| 176 | def __len__(self): return len(constants) | ||
| 177 | def __getitem__(self, i): return constants[i] | ||
| 178 | def __repr__(self): return 'Enum' + str(names) | ||
| 179 | def __str__(self): return 'enum ' + str(constants) | ||
| 180 | |||
| 181 | class EnumValue(object): | ||
| 182 | __slots__ = ('__value') | ||
| 183 | def __init__(self, value): self.__value = value | ||
| 184 | Value = property(lambda self: self.__value) | ||
| 185 | EnumType = property(lambda self: EnumType) | ||
| 186 | def __hash__(self): return hash(self.__value) | ||
| 187 | def __cmp__(self, other): | ||
| 188 | # C fans might want to remove the following assertion | ||
| 189 | # to make all enums comparable by ordinal value {;)) | ||
| 190 | assert self.EnumType is other.EnumType, "Only values from the same enum are comparable" | ||
| 191 | return cmp(self.__value, other.__value) | ||
| 192 | def __invert__(self): return constants[maximum - self.__value] | ||
| 193 | def __nonzero__(self): return bool(self.__value) | ||
| 194 | def __repr__(self): return str(names[self.__value]) | ||
| 195 | |||
| 196 | maximum = len(names) - 1 | ||
| 197 | constants = [None] * len(names) | ||
| 198 | for i, each in enumerate(names): | ||
| 199 | val = EnumValue(i) | ||
| 200 | setattr(EnumClass, each, val) | ||
| 201 | constants[i] = val | ||
| 202 | constants = tuple(constants) | ||
| 203 | EnumType = EnumClass() | ||
| 204 | return EnumType | ||
| 205 | |||
| 206 | def lockfile(name): | ||
| 207 | """ | ||
| 208 | Use the file fn as a lock file, return when the lock has been acquired. | ||
| 209 | Returns a variable to pass to unlockfile(). | ||
| 210 | """ | ||
| 211 | while True: | ||
| 212 | # If we leave the lockfiles lying around there is no problem | ||
| 213 | # but we should clean up after ourselves. This gives potential | ||
| 214 | # for races though. To work around this, when we acquire the lock | ||
| 215 | # we check the file we locked was still the lock file on disk. | ||
| 216 | # by comparing inode numbers. If they don't match or the lockfile | ||
| 217 | # no longer exists, we start again. | ||
| 218 | |||
| 219 | # This implementation is unfair since the last person to request the | ||
| 220 | # lock is the most likely to win it. | ||
| 221 | |||
| 222 | lf = open(name, "a+") | ||
| 223 | fcntl.flock(lf.fileno(), fcntl.LOCK_EX) | ||
| 224 | statinfo = os.fstat(lf.fileno()) | ||
| 225 | if os.path.exists(lf.name): | ||
| 226 | statinfo2 = os.stat(lf.name) | ||
| 227 | if statinfo.st_ino == statinfo2.st_ino: | ||
| 228 | return lf | ||
| 229 | # File no longer exists or changed, retry | ||
| 230 | lf.close | ||
| 231 | |||
| 232 | def unlockfile(lf): | ||
| 233 | """ | ||
| 234 | Unlock a file locked using lockfile() | ||
| 235 | """ | ||
| 236 | os.unlink(lf.name) | ||
| 237 | fcntl.flock(lf.fileno(), fcntl.LOCK_UN) | ||
| 238 | lf.close | ||
| 239 | |||
| 240 | def md5_file(filename): | ||
| 241 | """ | ||
| 242 | Return the hex string representation of the MD5 checksum of filename. | ||
| 243 | """ | ||
| 244 | try: | ||
| 245 | import hashlib | ||
| 246 | m = hashlib.md5() | ||
| 247 | except ImportError: | ||
| 248 | import md5 | ||
| 249 | m = md5.new() | ||
| 250 | |||
| 251 | for line in open(filename): | ||
| 252 | m.update(line) | ||
| 253 | return m.hexdigest() | ||
| 254 | |||
| 255 | def sha256_file(filename): | ||
| 256 | """ | ||
| 257 | Return the hex string representation of the 256-bit SHA checksum of | ||
| 258 | filename. On Python 2.4 this will return None, so callers will need to | ||
| 259 | handle that by either skipping SHA checks, or running a standalone sha256sum | ||
| 260 | binary. | ||
| 261 | """ | ||
| 262 | try: | ||
| 263 | import hashlib | ||
| 264 | except ImportError: | ||
| 265 | return None | ||
| 266 | |||
| 267 | s = hashlib.sha256() | ||
| 268 | for line in open(filename): | ||
| 269 | s.update(line) | ||
| 270 | return s.hexdigest() | ||
diff --git a/bitbake-dev/lib/bb/xmlrpcserver.py b/bitbake-dev/lib/bb/xmlrpcserver.py new file mode 100644 index 0000000000..075eda0573 --- /dev/null +++ b/bitbake-dev/lib/bb/xmlrpcserver.py | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | # | ||
| 2 | # BitBake XMLRPC Server | ||
| 3 | # | ||
| 4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
| 5 | # Copyright (C) 2006 - 2008 Richard Purdie | ||
| 6 | # | ||
| 7 | # This program is free software; you can redistribute it and/or modify | ||
| 8 | # it under the terms of the GNU General Public License version 2 as | ||
| 9 | # published by the Free Software Foundation. | ||
| 10 | # | ||
| 11 | # This program is distributed in the hope that it will be useful, | ||
| 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | # GNU General Public License for more details. | ||
| 15 | # | ||
| 16 | # You should have received a copy of the GNU General Public License along | ||
| 17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | |||
| 20 | """ | ||
| 21 | This module implements an xmlrpc server for BitBake. | ||
| 22 | |||
| 23 | Use this by deriving a class from BitBakeXMLRPCServer and then adding | ||
| 24 | methods which you want to "export" via XMLRPC. If the methods have the | ||
| 25 | prefix xmlrpc_, then registering those function will happen automatically, | ||
| 26 | if not, you need to call register_function. | ||
| 27 | |||
| 28 | Use register_idle_function() to add a function which the xmlrpc server | ||
| 29 | calls from within server_forever when no requests are pending. Make sure | ||
| 30 | that those functions are non-blocking or else you will introduce latency | ||
| 31 | in the server's main loop. | ||
| 32 | """ | ||
| 33 | |||
| 34 | import bb | ||
| 35 | import xmlrpclib | ||
| 36 | |||
| 37 | DEBUG = False | ||
| 38 | |||
| 39 | from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler | ||
| 40 | import os, sys, inspect, select | ||
| 41 | |||
| 42 | class BitBakeServerCommands(): | ||
| 43 | def __init__(self, server, cooker): | ||
| 44 | self.cooker = cooker | ||
| 45 | self.server = server | ||
| 46 | |||
| 47 | def registerEventHandler(self, host, port): | ||
| 48 | """ | ||
| 49 | Register a remote UI Event Handler | ||
| 50 | """ | ||
| 51 | s = xmlrpclib.Server("http://%s:%d" % (host, port), allow_none=True) | ||
| 52 | return bb.event.register_UIHhandler(s) | ||
| 53 | |||
| 54 | def unregisterEventHandler(self, handlerNum): | ||
| 55 | """ | ||
| 56 | Unregister a remote UI Event Handler | ||
| 57 | """ | ||
| 58 | return bb.event.unregister_UIHhandler(handlerNum) | ||
| 59 | |||
| 60 | def runCommand(self, command): | ||
| 61 | """ | ||
| 62 | Run a cooker command on the server | ||
| 63 | """ | ||
| 64 | return self.cooker.command.runCommand(command) | ||
| 65 | |||
| 66 | def terminateServer(self): | ||
| 67 | """ | ||
| 68 | Trigger the server to quit | ||
| 69 | """ | ||
| 70 | self.server.quit = True | ||
| 71 | print "Server (cooker) exitting" | ||
| 72 | return | ||
| 73 | |||
| 74 | def ping(self): | ||
| 75 | """ | ||
| 76 | Dummy method which can be used to check the server is still alive | ||
| 77 | """ | ||
| 78 | return True | ||
| 79 | |||
| 80 | class BitBakeXMLRPCServer(SimpleXMLRPCServer): | ||
| 81 | # remove this when you're done with debugging | ||
| 82 | # allow_reuse_address = True | ||
| 83 | |||
| 84 | def __init__(self, cooker, interface = ("localhost", 0)): | ||
| 85 | """ | ||
| 86 | Constructor | ||
| 87 | """ | ||
| 88 | SimpleXMLRPCServer.__init__(self, interface, | ||
| 89 | requestHandler=SimpleXMLRPCRequestHandler, | ||
| 90 | logRequests=False, allow_none=True) | ||
| 91 | self._idlefuns = {} | ||
| 92 | self.host, self.port = self.socket.getsockname() | ||
| 93 | #self.register_introspection_functions() | ||
| 94 | commands = BitBakeServerCommands(self, cooker) | ||
| 95 | self.autoregister_all_functions(commands, "") | ||
| 96 | |||
| 97 | def autoregister_all_functions(self, context, prefix): | ||
| 98 | """ | ||
| 99 | Convenience method for registering all functions in the scope | ||
| 100 | of this class that start with a common prefix | ||
| 101 | """ | ||
| 102 | methodlist = inspect.getmembers(context, inspect.ismethod) | ||
| 103 | for name, method in methodlist: | ||
| 104 | if name.startswith(prefix): | ||
| 105 | self.register_function(method, name[len(prefix):]) | ||
| 106 | |||
| 107 | def register_idle_function(self, function, data): | ||
| 108 | """Register a function to be called while the server is idle""" | ||
| 109 | assert callable(function) | ||
| 110 | self._idlefuns[function] = data | ||
| 111 | |||
| 112 | def serve_forever(self): | ||
| 113 | """ | ||
| 114 | Serve Requests. Overloaded to honor a quit command | ||
| 115 | """ | ||
| 116 | self.quit = False | ||
| 117 | while not self.quit: | ||
| 118 | self.handle_request() | ||
| 119 | |||
| 120 | # Tell idle functions we're exiting | ||
| 121 | for function, data in self._idlefuns.items(): | ||
| 122 | try: | ||
| 123 | retval = function(self, data, True) | ||
| 124 | except: | ||
| 125 | pass | ||
| 126 | |||
| 127 | self.server_close() | ||
| 128 | return | ||
| 129 | |||
| 130 | def get_request(self): | ||
| 131 | """ | ||
| 132 | Get next request. Behaves like the parent class unless a waitpid callback | ||
| 133 | has been set. In that case, we regularly check waitpid when the server is idle | ||
| 134 | """ | ||
| 135 | while True: | ||
| 136 | # wait 500 ms for an xmlrpc request | ||
| 137 | if DEBUG: | ||
| 138 | print "DEBUG: select'ing 500ms waiting for an xmlrpc request..." | ||
| 139 | ifds, ofds, xfds = select.select([self.socket.fileno()], [], [], 0.5) | ||
| 140 | if ifds: | ||
| 141 | return self.socket.accept() | ||
| 142 | # call idle functions only if we're not shutting down atm to prevent a recursion | ||
| 143 | if not self.quit: | ||
| 144 | if DEBUG: | ||
| 145 | print "DEBUG: server is idle -- calling idle functions..." | ||
| 146 | for function, data in self._idlefuns.items(): | ||
| 147 | try: | ||
| 148 | retval = function(self, data, False) | ||
| 149 | if not retval: | ||
| 150 | del self._idlefuns[function] | ||
| 151 | except SystemExit: | ||
| 152 | raise | ||
| 153 | except: | ||
| 154 | import traceback | ||
| 155 | traceback.print_exc() | ||
| 156 | pass | ||
| 157 | |||
