diff options
Diffstat (limited to 'scripts/lib/mic/utils')
| -rw-r--r-- | scripts/lib/mic/utils/BmapCreate.py | 298 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/Fiemap.py | 252 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/__init__.py | 0 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/cmdln.py | 1586 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/errors.py | 71 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/fs_related.py | 1029 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/gpt_parser.py | 331 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/grabber.py | 97 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/misc.py | 1067 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/partitionedfs.py | 790 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/proxy.py | 183 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/rpmmisc.py | 600 | ||||
| -rw-r--r-- | scripts/lib/mic/utils/runner.py | 109 |
13 files changed, 6413 insertions, 0 deletions
diff --git a/scripts/lib/mic/utils/BmapCreate.py b/scripts/lib/mic/utils/BmapCreate.py new file mode 100644 index 0000000000..65b19a5f46 --- /dev/null +++ b/scripts/lib/mic/utils/BmapCreate.py | |||
| @@ -0,0 +1,298 @@ | |||
| 1 | """ This module implements the block map (bmap) creation functionality and | ||
| 2 | provides the corresponding API in form of the 'BmapCreate' class. | ||
| 3 | |||
| 4 | The idea is that while images files may generally be very large (e.g., 4GiB), | ||
| 5 | they may nevertheless contain only little real data, e.g., 512MiB. This data | ||
| 6 | are files, directories, file-system meta-data, partition table, etc. When | ||
| 7 | copying the image to the target device, you do not have to copy all the 4GiB of | ||
| 8 | data, you can copy only 512MiB of it, which is 4 times less, so copying should | ||
| 9 | presumably be 4 times faster. | ||
| 10 | |||
| 11 | The block map file is an XML file which contains a list of blocks which have to | ||
| 12 | be copied to the target device. The other blocks are not used and there is no | ||
| 13 | need to copy them. The XML file also contains some additional information like | ||
| 14 | block size, image size, count of mapped blocks, etc. There are also many | ||
| 15 | commentaries, so it is human-readable. | ||
| 16 | |||
| 17 | The image has to be a sparse file. Generally, this means that when you generate | ||
| 18 | this image file, you should start with a huge sparse file which contains a | ||
| 19 | single hole spanning the entire file. Then you should partition it, write all | ||
| 20 | the data (probably by means of loop-back mounting the image or parts of it), | ||
| 21 | etc. The end result should be a sparse file where mapped areas represent useful | ||
| 22 | parts of the image and holes represent useless parts of the image, which do not | ||
| 23 | have to be copied when copying the image to the target device. | ||
| 24 | |||
| 25 | This module uses the FIBMAP ioctl to detect holes. """ | ||
| 26 | |||
| 27 | # Disable the following pylint recommendations: | ||
| 28 | # * Too many instance attributes - R0902 | ||
| 29 | # * Too few public methods - R0903 | ||
| 30 | # pylint: disable=R0902,R0903 | ||
| 31 | |||
| 32 | import hashlib | ||
| 33 | from mic.utils.misc import human_size | ||
| 34 | from mic.utils import Fiemap | ||
| 35 | |||
| 36 | # The bmap format version we generate | ||
| 37 | SUPPORTED_BMAP_VERSION = "1.3" | ||
| 38 | |||
| 39 | _BMAP_START_TEMPLATE = \ | ||
| 40 | """<?xml version="1.0" ?> | ||
| 41 | <!-- This file contains the block map for an image file, which is basically | ||
| 42 | a list of useful (mapped) block numbers in the image file. In other words, | ||
| 43 | it lists only those blocks which contain data (boot sector, partition | ||
| 44 | table, file-system metadata, files, directories, extents, etc). These | ||
| 45 | blocks have to be copied to the target device. The other blocks do not | ||
| 46 | contain any useful data and do not have to be copied to the target | ||
| 47 | device. | ||
| 48 | |||
| 49 | The block map an optimization which allows to copy or flash the image to | ||
| 50 | the image quicker than copying of flashing the entire image. This is | ||
| 51 | because with bmap less data is copied: <MappedBlocksCount> blocks instead | ||
| 52 | of <BlocksCount> blocks. | ||
| 53 | |||
| 54 | Besides the machine-readable data, this file contains useful commentaries | ||
| 55 | which contain human-readable information like image size, percentage of | ||
| 56 | mapped data, etc. | ||
| 57 | |||
| 58 | The 'version' attribute is the block map file format version in the | ||
| 59 | 'major.minor' format. The version major number is increased whenever an | ||
| 60 | incompatible block map format change is made. The minor number changes | ||
| 61 | in case of minor backward-compatible changes. --> | ||
| 62 | |||
| 63 | <bmap version="%s"> | ||
| 64 | <!-- Image size in bytes: %s --> | ||
| 65 | <ImageSize> %u </ImageSize> | ||
| 66 | |||
| 67 | <!-- Size of a block in bytes --> | ||
| 68 | <BlockSize> %u </BlockSize> | ||
| 69 | |||
| 70 | <!-- Count of blocks in the image file --> | ||
| 71 | <BlocksCount> %u </BlocksCount> | ||
| 72 | |||
| 73 | """ | ||
| 74 | |||
| 75 | class Error(Exception): | ||
| 76 | """ A class for exceptions generated by this module. We currently support | ||
| 77 | only one type of exceptions, and we basically throw human-readable problem | ||
| 78 | description in case of errors. """ | ||
| 79 | pass | ||
| 80 | |||
| 81 | class BmapCreate: | ||
| 82 | """ This class implements the bmap creation functionality. To generate a | ||
| 83 | bmap for an image (which is supposedly a sparse file), you should first | ||
| 84 | create an instance of 'BmapCreate' and provide: | ||
| 85 | |||
| 86 | * full path or a file-like object of the image to create bmap for | ||
| 87 | * full path or a file object to use for writing the results to | ||
| 88 | |||
| 89 | Then you should invoke the 'generate()' method of this class. It will use | ||
| 90 | the FIEMAP ioctl to generate the bmap. """ | ||
| 91 | |||
| 92 | def _open_image_file(self): | ||
| 93 | """ Open the image file. """ | ||
| 94 | |||
| 95 | try: | ||
| 96 | self._f_image = open(self._image_path, 'rb') | ||
| 97 | except IOError as err: | ||
| 98 | raise Error("cannot open image file '%s': %s" \ | ||
| 99 | % (self._image_path, err)) | ||
| 100 | |||
| 101 | self._f_image_needs_close = True | ||
| 102 | |||
| 103 | def _open_bmap_file(self): | ||
| 104 | """ Open the bmap file. """ | ||
| 105 | |||
| 106 | try: | ||
| 107 | self._f_bmap = open(self._bmap_path, 'w+') | ||
| 108 | except IOError as err: | ||
| 109 | raise Error("cannot open bmap file '%s': %s" \ | ||
| 110 | % (self._bmap_path, err)) | ||
| 111 | |||
| 112 | self._f_bmap_needs_close = True | ||
| 113 | |||
| 114 | def __init__(self, image, bmap): | ||
| 115 | """ Initialize a class instance: | ||
| 116 | * image - full path or a file-like object of the image to create bmap | ||
| 117 | for | ||
| 118 | * bmap - full path or a file object to use for writing the resulting | ||
| 119 | bmap to """ | ||
| 120 | |||
| 121 | self.image_size = None | ||
| 122 | self.image_size_human = None | ||
| 123 | self.block_size = None | ||
| 124 | self.blocks_cnt = None | ||
| 125 | self.mapped_cnt = None | ||
| 126 | self.mapped_size = None | ||
| 127 | self.mapped_size_human = None | ||
| 128 | self.mapped_percent = None | ||
| 129 | |||
| 130 | self._mapped_count_pos1 = None | ||
| 131 | self._mapped_count_pos2 = None | ||
| 132 | self._sha1_pos = None | ||
| 133 | |||
| 134 | self._f_image_needs_close = False | ||
| 135 | self._f_bmap_needs_close = False | ||
| 136 | |||
| 137 | if hasattr(image, "read"): | ||
| 138 | self._f_image = image | ||
| 139 | self._image_path = image.name | ||
| 140 | else: | ||
| 141 | self._image_path = image | ||
| 142 | self._open_image_file() | ||
| 143 | |||
| 144 | if hasattr(bmap, "read"): | ||
| 145 | self._f_bmap = bmap | ||
| 146 | self._bmap_path = bmap.name | ||
| 147 | else: | ||
| 148 | self._bmap_path = bmap | ||
| 149 | self._open_bmap_file() | ||
| 150 | |||
| 151 | self.fiemap = Fiemap.Fiemap(self._f_image) | ||
| 152 | |||
| 153 | self.image_size = self.fiemap.image_size | ||
| 154 | self.image_size_human = human_size(self.image_size) | ||
| 155 | if self.image_size == 0: | ||
| 156 | raise Error("cannot generate bmap for zero-sized image file '%s'" \ | ||
| 157 | % self._image_path) | ||
| 158 | |||
| 159 | self.block_size = self.fiemap.block_size | ||
| 160 | self.blocks_cnt = self.fiemap.blocks_cnt | ||
| 161 | |||
| 162 | def _bmap_file_start(self): | ||
| 163 | """ A helper function which generates the starting contents of the | ||
| 164 | block map file: the header comment, image size, block size, etc. """ | ||
| 165 | |||
| 166 | # We do not know the amount of mapped blocks at the moment, so just put | ||
| 167 | # whitespaces instead of real numbers. Assume the longest possible | ||
| 168 | # numbers. | ||
| 169 | mapped_count = ' ' * len(str(self.image_size)) | ||
| 170 | mapped_size_human = ' ' * len(self.image_size_human) | ||
| 171 | |||
| 172 | xml = _BMAP_START_TEMPLATE \ | ||
| 173 | % (SUPPORTED_BMAP_VERSION, self.image_size_human, | ||
| 174 | self.image_size, self.block_size, self.blocks_cnt) | ||
| 175 | xml += " <!-- Count of mapped blocks: " | ||
| 176 | |||
| 177 | self._f_bmap.write(xml) | ||
| 178 | self._mapped_count_pos1 = self._f_bmap.tell() | ||
| 179 | |||
| 180 | # Just put white-spaces instead of real information about mapped blocks | ||
| 181 | xml = "%s or %.1f -->\n" % (mapped_size_human, 100.0) | ||
| 182 | xml += " <MappedBlocksCount> " | ||
| 183 | |||
| 184 | self._f_bmap.write(xml) | ||
| 185 | self._mapped_count_pos2 = self._f_bmap.tell() | ||
| 186 | |||
| 187 | xml = "%s </MappedBlocksCount>\n\n" % mapped_count | ||
| 188 | |||
| 189 | # pylint: disable=C0301 | ||
| 190 | xml += " <!-- The checksum of this bmap file. When it is calculated, the value of\n" | ||
| 191 | xml += " the SHA1 checksum has be zeoro (40 ASCII \"0\" symbols). -->\n" | ||
| 192 | xml += " <BmapFileSHA1> " | ||
| 193 | |||
| 194 | self._f_bmap.write(xml) | ||
| 195 | self._sha1_pos = self._f_bmap.tell() | ||
| 196 | |||
| 197 | xml = "0" * 40 + " </BmapFileSHA1>\n\n" | ||
| 198 | xml += " <!-- The block map which consists of elements which may either be a\n" | ||
| 199 | xml += " range of blocks or a single block. The 'sha1' attribute (if present)\n" | ||
| 200 | xml += " is the SHA1 checksum of this blocks range. -->\n" | ||
| 201 | xml += " <BlockMap>\n" | ||
| 202 | # pylint: enable=C0301 | ||
| 203 | |||
| 204 | self._f_bmap.write(xml) | ||
| 205 | |||
| 206 | def _bmap_file_end(self): | ||
| 207 | """ A helper function which generates the final parts of the block map | ||
| 208 | file: the ending tags and the information about the amount of mapped | ||
| 209 | blocks. """ | ||
| 210 | |||
| 211 | xml = " </BlockMap>\n" | ||
| 212 | xml += "</bmap>\n" | ||
| 213 | |||
| 214 | self._f_bmap.write(xml) | ||
| 215 | |||
| 216 | self._f_bmap.seek(self._mapped_count_pos1) | ||
| 217 | self._f_bmap.write("%s or %.1f%%" % \ | ||
| 218 | (self.mapped_size_human, self.mapped_percent)) | ||
| 219 | |||
| 220 | self._f_bmap.seek(self._mapped_count_pos2) | ||
| 221 | self._f_bmap.write("%u" % self.mapped_cnt) | ||
| 222 | |||
| 223 | self._f_bmap.seek(0) | ||
| 224 | sha1 = hashlib.sha1(self._f_bmap.read()).hexdigest() | ||
| 225 | self._f_bmap.seek(self._sha1_pos) | ||
| 226 | self._f_bmap.write("%s" % sha1) | ||
| 227 | |||
| 228 | def _calculate_sha1(self, first, last): | ||
| 229 | """ A helper function which calculates SHA1 checksum for the range of | ||
| 230 | blocks of the image file: from block 'first' to block 'last'. """ | ||
| 231 | |||
| 232 | start = first * self.block_size | ||
| 233 | end = (last + 1) * self.block_size | ||
| 234 | |||
| 235 | self._f_image.seek(start) | ||
| 236 | hash_obj = hashlib.new("sha1") | ||
| 237 | |||
| 238 | chunk_size = 1024*1024 | ||
| 239 | to_read = end - start | ||
| 240 | read = 0 | ||
| 241 | |||
| 242 | while read < to_read: | ||
| 243 | if read + chunk_size > to_read: | ||
| 244 | chunk_size = to_read - read | ||
| 245 | chunk = self._f_image.read(chunk_size) | ||
| 246 | hash_obj.update(chunk) | ||
| 247 | read += chunk_size | ||
| 248 | |||
| 249 | return hash_obj.hexdigest() | ||
| 250 | |||
| 251 | def generate(self, include_checksums = True): | ||
| 252 | """ Generate bmap for the image file. If 'include_checksums' is 'True', | ||
| 253 | also generate SHA1 checksums for block ranges. """ | ||
| 254 | |||
| 255 | # Save image file position in order to restore it at the end | ||
| 256 | image_pos = self._f_image.tell() | ||
| 257 | |||
| 258 | self._bmap_file_start() | ||
| 259 | |||
| 260 | # Generate the block map and write it to the XML block map | ||
| 261 | # file as we go. | ||
| 262 | self.mapped_cnt = 0 | ||
| 263 | for first, last in self.fiemap.get_mapped_ranges(0, self.blocks_cnt): | ||
| 264 | self.mapped_cnt += last - first + 1 | ||
| 265 | if include_checksums: | ||
| 266 | sha1 = self._calculate_sha1(first, last) | ||
| 267 | sha1 = " sha1=\"%s\"" % sha1 | ||
| 268 | else: | ||
| 269 | sha1 = "" | ||
| 270 | |||
| 271 | if first != last: | ||
| 272 | self._f_bmap.write(" <Range%s> %s-%s </Range>\n" \ | ||
| 273 | % (sha1, first, last)) | ||
| 274 | else: | ||
| 275 | self._f_bmap.write(" <Range%s> %s </Range>\n" \ | ||
| 276 | % (sha1, first)) | ||
| 277 | |||
| 278 | self.mapped_size = self.mapped_cnt * self.block_size | ||
| 279 | self.mapped_size_human = human_size(self.mapped_size) | ||
| 280 | self.mapped_percent = (self.mapped_cnt * 100.0) / self.blocks_cnt | ||
| 281 | |||
| 282 | self._bmap_file_end() | ||
| 283 | |||
| 284 | try: | ||
| 285 | self._f_bmap.flush() | ||
| 286 | except IOError as err: | ||
| 287 | raise Error("cannot flush the bmap file '%s': %s" \ | ||
| 288 | % (self._bmap_path, err)) | ||
| 289 | |||
| 290 | self._f_image.seek(image_pos) | ||
| 291 | |||
| 292 | def __del__(self): | ||
| 293 | """ The class destructor which closes the opened files. """ | ||
| 294 | |||
| 295 | if self._f_image_needs_close: | ||
| 296 | self._f_image.close() | ||
| 297 | if self._f_bmap_needs_close: | ||
| 298 | self._f_bmap.close() | ||
diff --git a/scripts/lib/mic/utils/Fiemap.py b/scripts/lib/mic/utils/Fiemap.py new file mode 100644 index 0000000000..f2db6ff0b8 --- /dev/null +++ b/scripts/lib/mic/utils/Fiemap.py | |||
| @@ -0,0 +1,252 @@ | |||
| 1 | """ This module implements python API for the FIEMAP ioctl. The FIEMAP ioctl | ||
| 2 | allows to find holes and mapped areas in a file. """ | ||
| 3 | |||
| 4 | # Note, a lot of code in this module is not very readable, because it deals | ||
| 5 | # with the rather complex FIEMAP ioctl. To understand the code, you need to | ||
| 6 | # know the FIEMAP interface, which is documented in the | ||
| 7 | # Documentation/filesystems/fiemap.txt file in the Linux kernel sources. | ||
| 8 | |||
| 9 | # Disable the following pylint recommendations: | ||
| 10 | # * Too many instance attributes (R0902) | ||
| 11 | # pylint: disable=R0902 | ||
| 12 | |||
| 13 | import os | ||
| 14 | import struct | ||
| 15 | import array | ||
| 16 | import fcntl | ||
| 17 | from mic.utils.misc import get_block_size | ||
| 18 | |||
| 19 | # Format string for 'struct fiemap' | ||
| 20 | _FIEMAP_FORMAT = "=QQLLLL" | ||
| 21 | # sizeof(struct fiemap) | ||
| 22 | _FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT) | ||
| 23 | # Format string for 'struct fiemap_extent' | ||
| 24 | _FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL" | ||
| 25 | # sizeof(struct fiemap_extent) | ||
| 26 | _FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT) | ||
| 27 | # The FIEMAP ioctl number | ||
| 28 | _FIEMAP_IOCTL = 0xC020660B | ||
| 29 | |||
| 30 | # Minimum buffer which is required for 'class Fiemap' to operate | ||
| 31 | MIN_BUFFER_SIZE = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE | ||
| 32 | # The default buffer size for 'class Fiemap' | ||
| 33 | DEFAULT_BUFFER_SIZE = 256 * 1024 | ||
| 34 | |||
| 35 | class Error(Exception): | ||
| 36 | """ A class for exceptions generated by this module. We currently support | ||
| 37 | only one type of exceptions, and we basically throw human-readable problem | ||
| 38 | description in case of errors. """ | ||
| 39 | pass | ||
| 40 | |||
| 41 | class Fiemap: | ||
| 42 | """ This class provides API to the FIEMAP ioctl. Namely, it allows to | ||
| 43 | iterate over all mapped blocks and over all holes. """ | ||
| 44 | |||
| 45 | def _open_image_file(self): | ||
| 46 | """ Open the image file. """ | ||
| 47 | |||
| 48 | try: | ||
| 49 | self._f_image = open(self._image_path, 'rb') | ||
| 50 | except IOError as err: | ||
| 51 | raise Error("cannot open image file '%s': %s" \ | ||
| 52 | % (self._image_path, err)) | ||
| 53 | |||
| 54 | self._f_image_needs_close = True | ||
| 55 | |||
| 56 | def __init__(self, image, buf_size = DEFAULT_BUFFER_SIZE): | ||
| 57 | """ Initialize a class instance. The 'image' argument is full path to | ||
| 58 | the file to operate on, or a file object to operate on. | ||
| 59 | |||
| 60 | The 'buf_size' argument is the size of the buffer for 'struct | ||
| 61 | fiemap_extent' elements which will be used when invoking the FIEMAP | ||
| 62 | ioctl. The larger is the buffer, the less times the FIEMAP ioctl will | ||
| 63 | be invoked. """ | ||
| 64 | |||
| 65 | self._f_image_needs_close = False | ||
| 66 | |||
| 67 | if hasattr(image, "fileno"): | ||
| 68 | self._f_image = image | ||
| 69 | self._image_path = image.name | ||
| 70 | else: | ||
| 71 | self._image_path = image | ||
| 72 | self._open_image_file() | ||
| 73 | |||
| 74 | # Validate 'buf_size' | ||
| 75 | if buf_size < MIN_BUFFER_SIZE: | ||
| 76 | raise Error("too small buffer (%d bytes), minimum is %d bytes" \ | ||
| 77 | % (buf_size, MIN_BUFFER_SIZE)) | ||
| 78 | |||
| 79 | # How many 'struct fiemap_extent' elements fit the buffer | ||
| 80 | buf_size -= _FIEMAP_SIZE | ||
| 81 | self._fiemap_extent_cnt = buf_size / _FIEMAP_EXTENT_SIZE | ||
| 82 | self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE | ||
| 83 | self._buf_size += _FIEMAP_SIZE | ||
| 84 | |||
| 85 | # Allocate a mutable buffer for the FIEMAP ioctl | ||
| 86 | self._buf = array.array('B', [0] * self._buf_size) | ||
| 87 | |||
| 88 | self.image_size = os.fstat(self._f_image.fileno()).st_size | ||
| 89 | |||
| 90 | try: | ||
| 91 | self.block_size = get_block_size(self._f_image) | ||
| 92 | except IOError as err: | ||
| 93 | raise Error("cannot get block size for '%s': %s" \ | ||
| 94 | % (self._image_path, err)) | ||
| 95 | |||
| 96 | self.blocks_cnt = self.image_size + self.block_size - 1 | ||
| 97 | self.blocks_cnt /= self.block_size | ||
| 98 | |||
| 99 | # Synchronize the image file to make sure FIEMAP returns correct values | ||
| 100 | try: | ||
| 101 | self._f_image.flush() | ||
| 102 | except IOError as err: | ||
| 103 | raise Error("cannot flush image file '%s': %s" \ | ||
| 104 | % (self._image_path, err)) | ||
| 105 | try: | ||
| 106 | os.fsync(self._f_image.fileno()), | ||
| 107 | except OSError as err: | ||
| 108 | raise Error("cannot synchronize image file '%s': %s " \ | ||
| 109 | % (self._image_path, err.strerror)) | ||
| 110 | |||
| 111 | # Check if the FIEMAP ioctl is supported | ||
| 112 | self.block_is_mapped(0) | ||
| 113 | |||
| 114 | def __del__(self): | ||
| 115 | """ The class destructor which closes the opened files. """ | ||
| 116 | |||
| 117 | if self._f_image_needs_close: | ||
| 118 | self._f_image.close() | ||
| 119 | |||
| 120 | def _invoke_fiemap(self, block, count): | ||
| 121 | """ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from | ||
| 122 | block number 'block'. | ||
| 123 | |||
| 124 | The full result of the operation is stored in 'self._buf' on exit. | ||
| 125 | Returns the unpacked 'struct fiemap' data structure in form of a python | ||
| 126 | list (just like 'struct.upack()'). """ | ||
| 127 | |||
| 128 | if block < 0 or block >= self.blocks_cnt: | ||
| 129 | raise Error("bad block number %d, should be within [0, %d]" \ | ||
| 130 | % (block, self.blocks_cnt)) | ||
| 131 | |||
| 132 | # Initialize the 'struct fiemap' part of the buffer | ||
| 133 | struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size, | ||
| 134 | count * self.block_size, 0, 0, | ||
| 135 | self._fiemap_extent_cnt, 0) | ||
| 136 | |||
| 137 | try: | ||
| 138 | fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1) | ||
| 139 | except IOError as err: | ||
| 140 | error_msg = "the FIEMAP ioctl failed for '%s': %s" \ | ||
| 141 | % (self._image_path, err) | ||
| 142 | if err.errno == os.errno.EPERM or err.errno == os.errno.EACCES: | ||
| 143 | # The FIEMAP ioctl was added in kernel version 2.6.28 in 2008 | ||
| 144 | error_msg += " (looks like your kernel does not support FIEMAP)" | ||
| 145 | |||
| 146 | raise Error(error_msg) | ||
| 147 | |||
| 148 | return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE]) | ||
| 149 | |||
| 150 | def block_is_mapped(self, block): | ||
| 151 | """ This function returns 'True' if block number 'block' of the image | ||
| 152 | file is mapped and 'False' otherwise. """ | ||
| 153 | |||
| 154 | struct_fiemap = self._invoke_fiemap(block, 1) | ||
| 155 | |||
| 156 | # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field. | ||
| 157 | # If it contains zero, the block is not mapped, otherwise it is | ||
| 158 | # mapped. | ||
| 159 | return bool(struct_fiemap[3]) | ||
| 160 | |||
| 161 | def block_is_unmapped(self, block): | ||
| 162 | """ This function returns 'True' if block number 'block' of the image | ||
| 163 | file is not mapped (hole) and 'False' otherwise. """ | ||
| 164 | |||
| 165 | return not self.block_is_mapped(block) | ||
| 166 | |||
| 167 | def _unpack_fiemap_extent(self, index): | ||
| 168 | """ Unpack a 'struct fiemap_extent' structure object number 'index' | ||
| 169 | from the internal 'self._buf' buffer. """ | ||
| 170 | |||
| 171 | offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index | ||
| 172 | return struct.unpack(_FIEMAP_EXTENT_FORMAT, | ||
| 173 | self._buf[offset : offset + _FIEMAP_EXTENT_SIZE]) | ||
| 174 | |||
| 175 | def _do_get_mapped_ranges(self, start, count): | ||
| 176 | """ Implements most the functionality for the 'get_mapped_ranges()' | ||
| 177 | generator: invokes the FIEMAP ioctl, walks through the mapped | ||
| 178 | extents and yields mapped block ranges. However, the ranges may be | ||
| 179 | consecutive (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()' | ||
| 180 | simply merges them. """ | ||
| 181 | |||
| 182 | block = start | ||
| 183 | while block < start + count: | ||
| 184 | struct_fiemap = self._invoke_fiemap(block, count) | ||
| 185 | |||
| 186 | mapped_extents = struct_fiemap[3] | ||
| 187 | if mapped_extents == 0: | ||
| 188 | # No more mapped blocks | ||
| 189 | return | ||
| 190 | |||
| 191 | extent = 0 | ||
| 192 | while extent < mapped_extents: | ||
| 193 | fiemap_extent = self._unpack_fiemap_extent(extent) | ||
| 194 | |||
| 195 | # Start of the extent | ||
| 196 | extent_start = fiemap_extent[0] | ||
| 197 | # Starting block number of the extent | ||
| 198 | extent_block = extent_start / self.block_size | ||
| 199 | # Length of the extent | ||
| 200 | extent_len = fiemap_extent[2] | ||
| 201 | # Count of blocks in the extent | ||
| 202 | extent_count = extent_len / self.block_size | ||
| 203 | |||
| 204 | # Extent length and offset have to be block-aligned | ||
| 205 | assert extent_start % self.block_size == 0 | ||
| 206 | assert extent_len % self.block_size == 0 | ||
| 207 | |||
| 208 | if extent_block > start + count - 1: | ||
| 209 | return | ||
| 210 | |||
| 211 | first = max(extent_block, block) | ||
| 212 | last = min(extent_block + extent_count, start + count) - 1 | ||
| 213 | yield (first, last) | ||
| 214 | |||
| 215 | extent += 1 | ||
| 216 | |||
| 217 | block = extent_block + extent_count | ||
| 218 | |||
| 219 | def get_mapped_ranges(self, start, count): | ||
| 220 | """ A generator which yields ranges of mapped blocks in the file. The | ||
| 221 | ranges are tuples of 2 elements: [first, last], where 'first' is the | ||
| 222 | first mapped block and 'last' is the last mapped block. | ||
| 223 | |||
| 224 | The ranges are yielded for the area of the file of size 'count' blocks, | ||
| 225 | starting from block 'start'. """ | ||
| 226 | |||
| 227 | iterator = self._do_get_mapped_ranges(start, count) | ||
| 228 | |||
| 229 | first_prev, last_prev = iterator.next() | ||
| 230 | |||
| 231 | for first, last in iterator: | ||
| 232 | if last_prev == first - 1: | ||
| 233 | last_prev = last | ||
| 234 | else: | ||
| 235 | yield (first_prev, last_prev) | ||
| 236 | first_prev, last_prev = first, last | ||
| 237 | |||
| 238 | yield (first_prev, last_prev) | ||
| 239 | |||
| 240 | def get_unmapped_ranges(self, start, count): | ||
| 241 | """ Just like 'get_mapped_ranges()', but yields unmapped block ranges | ||
| 242 | instead (holes). """ | ||
| 243 | |||
| 244 | hole_first = start | ||
| 245 | for first, last in self._do_get_mapped_ranges(start, count): | ||
| 246 | if first > hole_first: | ||
| 247 | yield (hole_first, first - 1) | ||
| 248 | |||
| 249 | hole_first = last + 1 | ||
| 250 | |||
| 251 | if hole_first < start + count: | ||
| 252 | yield (hole_first, start + count - 1) | ||
diff --git a/scripts/lib/mic/utils/__init__.py b/scripts/lib/mic/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/mic/utils/__init__.py | |||
diff --git a/scripts/lib/mic/utils/cmdln.py b/scripts/lib/mic/utils/cmdln.py new file mode 100644 index 0000000000..b099473ee4 --- /dev/null +++ b/scripts/lib/mic/utils/cmdln.py | |||
| @@ -0,0 +1,1586 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | # Copyright (c) 2002-2007 ActiveState Software Inc. | ||
| 3 | # License: MIT (see LICENSE.txt for license details) | ||
| 4 | # Author: Trent Mick | ||
| 5 | # Home: http://trentm.com/projects/cmdln/ | ||
| 6 | |||
| 7 | """An improvement on Python's standard cmd.py module. | ||
| 8 | |||
| 9 | As with cmd.py, this module provides "a simple framework for writing | ||
| 10 | line-oriented command intepreters." This module provides a 'RawCmdln' | ||
| 11 | class that fixes some design flaws in cmd.Cmd, making it more scalable | ||
| 12 | and nicer to use for good 'cvs'- or 'svn'-style command line interfaces | ||
| 13 | or simple shells. And it provides a 'Cmdln' class that add | ||
| 14 | optparse-based option processing. Basically you use it like this: | ||
| 15 | |||
| 16 | import cmdln | ||
| 17 | |||
| 18 | class MySVN(cmdln.Cmdln): | ||
| 19 | name = "svn" | ||
| 20 | |||
| 21 | @cmdln.alias('stat', 'st') | ||
| 22 | @cmdln.option('-v', '--verbose', action='store_true' | ||
| 23 | help='print verbose information') | ||
| 24 | def do_status(self, subcmd, opts, *paths): | ||
| 25 | print "handle 'svn status' command" | ||
| 26 | |||
| 27 | #... | ||
| 28 | |||
| 29 | if __name__ == "__main__": | ||
| 30 | shell = MySVN() | ||
| 31 | retval = shell.main() | ||
| 32 | sys.exit(retval) | ||
| 33 | |||
| 34 | See the README.txt or <http://trentm.com/projects/cmdln/> for more | ||
| 35 | details. | ||
| 36 | """ | ||
| 37 | |||
| 38 | __version_info__ = (1, 1, 2) | ||
| 39 | __version__ = '.'.join(map(str, __version_info__)) | ||
| 40 | |||
| 41 | import os | ||
| 42 | import sys | ||
| 43 | import re | ||
| 44 | import cmd | ||
| 45 | import optparse | ||
| 46 | from pprint import pprint | ||
| 47 | import sys | ||
| 48 | |||
| 49 | |||
| 50 | |||
| 51 | |||
| 52 | #---- globals | ||
| 53 | |||
| 54 | LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3) | ||
| 55 | |||
| 56 | # An unspecified optional argument when None is a meaningful value. | ||
| 57 | _NOT_SPECIFIED = ("Not", "Specified") | ||
| 58 | |||
| 59 | # Pattern to match a TypeError message from a call that | ||
| 60 | # failed because of incorrect number of arguments (see | ||
| 61 | # Python/getargs.c). | ||
| 62 | _INCORRECT_NUM_ARGS_RE = re.compile( | ||
| 63 | r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))") | ||
| 64 | |||
| 65 | |||
| 66 | |||
| 67 | #---- exceptions | ||
| 68 | |||
| 69 | class CmdlnError(Exception): | ||
| 70 | """A cmdln.py usage error.""" | ||
| 71 | def __init__(self, msg): | ||
| 72 | self.msg = msg | ||
| 73 | def __str__(self): | ||
| 74 | return self.msg | ||
| 75 | |||
| 76 | class CmdlnUserError(Exception): | ||
| 77 | """An error by a user of a cmdln-based tool/shell.""" | ||
| 78 | pass | ||
| 79 | |||
| 80 | |||
| 81 | |||
| 82 | #---- public methods and classes | ||
| 83 | |||
| 84 | def alias(*aliases): | ||
| 85 | """Decorator to add aliases for Cmdln.do_* command handlers. | ||
| 86 | |||
| 87 | Example: | ||
| 88 | class MyShell(cmdln.Cmdln): | ||
| 89 | @cmdln.alias("!", "sh") | ||
| 90 | def do_shell(self, argv): | ||
| 91 | #...implement 'shell' command | ||
| 92 | """ | ||
| 93 | def decorate(f): | ||
| 94 | if not hasattr(f, "aliases"): | ||
| 95 | f.aliases = [] | ||
| 96 | f.aliases += aliases | ||
| 97 | return f | ||
| 98 | return decorate | ||
| 99 | |||
| 100 | |||
| 101 | class RawCmdln(cmd.Cmd): | ||
| 102 | """An improved (on cmd.Cmd) framework for building multi-subcommand | ||
| 103 | scripts (think "svn" & "cvs") and simple shells (think "pdb" and | ||
| 104 | "gdb"). | ||
| 105 | |||
| 106 | A simple example: | ||
| 107 | |||
| 108 | import cmdln | ||
| 109 | |||
| 110 | class MySVN(cmdln.RawCmdln): | ||
| 111 | name = "svn" | ||
| 112 | |||
| 113 | @cmdln.aliases('stat', 'st') | ||
| 114 | def do_status(self, argv): | ||
| 115 | print "handle 'svn status' command" | ||
| 116 | |||
| 117 | if __name__ == "__main__": | ||
| 118 | shell = MySVN() | ||
| 119 | retval = shell.main() | ||
| 120 | sys.exit(retval) | ||
| 121 | |||
| 122 | See <http://trentm.com/projects/cmdln> for more information. | ||
| 123 | """ | ||
| 124 | name = None # if unset, defaults basename(sys.argv[0]) | ||
| 125 | prompt = None # if unset, defaults to self.name+"> " | ||
| 126 | version = None # if set, default top-level options include --version | ||
| 127 | |||
| 128 | # Default messages for some 'help' command error cases. | ||
| 129 | # They are interpolated with one arg: the command. | ||
| 130 | nohelp = "no help on '%s'" | ||
| 131 | unknowncmd = "unknown command: '%s'" | ||
| 132 | |||
| 133 | helpindent = '' # string with which to indent help output | ||
| 134 | |||
| 135 | def __init__(self, completekey='tab', | ||
| 136 | stdin=None, stdout=None, stderr=None): | ||
| 137 | """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None) | ||
| 138 | |||
| 139 | The optional argument 'completekey' is the readline name of a | ||
| 140 | completion key; it defaults to the Tab key. If completekey is | ||
| 141 | not None and the readline module is available, command completion | ||
| 142 | is done automatically. | ||
| 143 | |||
| 144 | The optional arguments 'stdin', 'stdout' and 'stderr' specify | ||
| 145 | alternate input, output and error output file objects; if not | ||
| 146 | specified, sys.* are used. | ||
| 147 | |||
| 148 | If 'stdout' but not 'stderr' is specified, stdout is used for | ||
| 149 | error output. This is to provide least surprise for users used | ||
| 150 | to only the 'stdin' and 'stdout' options with cmd.Cmd. | ||
| 151 | """ | ||
| 152 | import sys | ||
| 153 | if self.name is None: | ||
| 154 | self.name = os.path.basename(sys.argv[0]) | ||
| 155 | if self.prompt is None: | ||
| 156 | self.prompt = self.name+"> " | ||
| 157 | self._name_str = self._str(self.name) | ||
| 158 | self._prompt_str = self._str(self.prompt) | ||
| 159 | if stdin is not None: | ||
| 160 | self.stdin = stdin | ||
| 161 | else: | ||
| 162 | self.stdin = sys.stdin | ||
| 163 | if stdout is not None: | ||
| 164 | self.stdout = stdout | ||
| 165 | else: | ||
| 166 | self.stdout = sys.stdout | ||
| 167 | if stderr is not None: | ||
| 168 | self.stderr = stderr | ||
| 169 | elif stdout is not None: | ||
| 170 | self.stderr = stdout | ||
| 171 | else: | ||
| 172 | self.stderr = sys.stderr | ||
| 173 | self.cmdqueue = [] | ||
| 174 | self.completekey = completekey | ||
| 175 | self.cmdlooping = False | ||
| 176 | |||
| 177 | def get_optparser(self): | ||
| 178 | """Hook for subclasses to set the option parser for the | ||
| 179 | top-level command/shell. | ||
| 180 | |||
| 181 | This option parser is used retrieved and used by `.main()' to | ||
| 182 | handle top-level options. | ||
| 183 | |||
| 184 | The default implements a single '-h|--help' option. Sub-classes | ||
| 185 | can return None to have no options at the top-level. Typically | ||
| 186 | an instance of CmdlnOptionParser should be returned. | ||
| 187 | """ | ||
| 188 | version = (self.version is not None | ||
| 189 | and "%s %s" % (self._name_str, self.version) | ||
| 190 | or None) | ||
| 191 | return CmdlnOptionParser(self, version=version) | ||
| 192 | |||
| 193 | def postoptparse(self): | ||
| 194 | """Hook method executed just after `.main()' parses top-level | ||
| 195 | options. | ||
| 196 | |||
| 197 | When called `self.options' holds the results of the option parse. | ||
| 198 | """ | ||
| 199 | pass | ||
| 200 | |||
| 201 | def main(self, argv=None, loop=LOOP_NEVER): | ||
| 202 | """A possible mainline handler for a script, like so: | ||
| 203 | |||
| 204 | import cmdln | ||
| 205 | class MyCmd(cmdln.Cmdln): | ||
| 206 | name = "mycmd" | ||
| 207 | ... | ||
| 208 | |||
| 209 | if __name__ == "__main__": | ||
| 210 | MyCmd().main() | ||
| 211 | |||
| 212 | By default this will use sys.argv to issue a single command to | ||
| 213 | 'MyCmd', then exit. The 'loop' argument can be use to control | ||
| 214 | interactive shell behaviour. | ||
| 215 | |||
| 216 | Arguments: | ||
| 217 | "argv" (optional, default sys.argv) is the command to run. | ||
| 218 | It must be a sequence, where the first element is the | ||
| 219 | command name and subsequent elements the args for that | ||
| 220 | command. | ||
| 221 | "loop" (optional, default LOOP_NEVER) is a constant | ||
| 222 | indicating if a command loop should be started (i.e. an | ||
| 223 | interactive shell). Valid values (constants on this module): | ||
| 224 | LOOP_ALWAYS start loop and run "argv", if any | ||
| 225 | LOOP_NEVER run "argv" (or .emptyline()) and exit | ||
| 226 | LOOP_IF_EMPTY run "argv", if given, and exit; | ||
| 227 | otherwise, start loop | ||
| 228 | """ | ||
| 229 | if argv is None: | ||
| 230 | import sys | ||
| 231 | argv = sys.argv | ||
| 232 | else: | ||
| 233 | argv = argv[:] # don't modify caller's list | ||
| 234 | |||
| 235 | self.optparser = self.get_optparser() | ||
| 236 | if self.optparser: # i.e. optparser=None means don't process for opts | ||
| 237 | try: | ||
| 238 | self.options, args = self.optparser.parse_args(argv[1:]) | ||
| 239 | except CmdlnUserError, ex: | ||
| 240 | msg = "%s: %s\nTry '%s help' for info.\n"\ | ||
| 241 | % (self.name, ex, self.name) | ||
| 242 | self.stderr.write(self._str(msg)) | ||
| 243 | self.stderr.flush() | ||
| 244 | return 1 | ||
| 245 | except StopOptionProcessing, ex: | ||
| 246 | return 0 | ||
| 247 | else: | ||
| 248 | self.options, args = None, argv[1:] | ||
| 249 | self.postoptparse() | ||
| 250 | |||
| 251 | if loop == LOOP_ALWAYS: | ||
| 252 | if args: | ||
| 253 | self.cmdqueue.append(args) | ||
| 254 | return self.cmdloop() | ||
| 255 | elif loop == LOOP_NEVER: | ||
| 256 | if args: | ||
| 257 | return self.cmd(args) | ||
| 258 | else: | ||
| 259 | return self.emptyline() | ||
| 260 | elif loop == LOOP_IF_EMPTY: | ||
| 261 | if args: | ||
| 262 | return self.cmd(args) | ||
| 263 | else: | ||
| 264 | return self.cmdloop() | ||
| 265 | |||
| 266 | def cmd(self, argv): | ||
| 267 | """Run one command and exit. | ||
| 268 | |||
| 269 | "argv" is the arglist for the command to run. argv[0] is the | ||
| 270 | command to run. If argv is an empty list then the | ||
| 271 | 'emptyline' handler is run. | ||
| 272 | |||
| 273 | Returns the return value from the command handler. | ||
| 274 | """ | ||
| 275 | assert isinstance(argv, (list, tuple)), \ | ||
| 276 | "'argv' is not a sequence: %r" % argv | ||
| 277 | retval = None | ||
| 278 | try: | ||
| 279 | argv = self.precmd(argv) | ||
| 280 | retval = self.onecmd(argv) | ||
| 281 | self.postcmd(argv) | ||
| 282 | except: | ||
| 283 | if not self.cmdexc(argv): | ||
| 284 | raise | ||
| 285 | retval = 1 | ||
| 286 | return retval | ||
| 287 | |||
| 288 | def _str(self, s): | ||
| 289 | """Safely convert the given str/unicode to a string for printing.""" | ||
| 290 | try: | ||
| 291 | return str(s) | ||
| 292 | except UnicodeError: | ||
| 293 | #XXX What is the proper encoding to use here? 'utf-8' seems | ||
| 294 | # to work better than "getdefaultencoding" (usually | ||
| 295 | # 'ascii'), on OS X at least. | ||
| 296 | #import sys | ||
| 297 | #return s.encode(sys.getdefaultencoding(), "replace") | ||
| 298 | return s.encode("utf-8", "replace") | ||
| 299 | |||
| 300 | def cmdloop(self, intro=None): | ||
| 301 | """Repeatedly issue a prompt, accept input, parse into an argv, and | ||
| 302 | dispatch (via .precmd(), .onecmd() and .postcmd()), passing them | ||
| 303 | the argv. In other words, start a shell. | ||
| 304 | |||
| 305 | "intro" (optional) is a introductory message to print when | ||
| 306 | starting the command loop. This overrides the class | ||
| 307 | "intro" attribute, if any. | ||
| 308 | """ | ||
| 309 | self.cmdlooping = True | ||
| 310 | self.preloop() | ||
| 311 | if self.use_rawinput and self.completekey: | ||
| 312 | try: | ||
| 313 | import readline | ||
| 314 | self.old_completer = readline.get_completer() | ||
| 315 | readline.set_completer(self.complete) | ||
| 316 | readline.parse_and_bind(self.completekey+": complete") | ||
| 317 | except ImportError: | ||
| 318 | pass | ||
| 319 | try: | ||
| 320 | if intro is None: | ||
| 321 | intro = self.intro | ||
| 322 | if intro: | ||
| 323 | intro_str = self._str(intro) | ||
| 324 | self.stdout.write(intro_str+'\n') | ||
| 325 | self.stop = False | ||
| 326 | retval = None | ||
| 327 | while not self.stop: | ||
| 328 | if self.cmdqueue: | ||
| 329 | argv = self.cmdqueue.pop(0) | ||
| 330 | assert isinstance(argv, (list, tuple)), \ | ||
| 331 | "item on 'cmdqueue' is not a sequence: %r" % argv | ||
| 332 | else: | ||
| 333 | if self.use_rawinput: | ||
| 334 | try: | ||
| 335 | line = raw_input(self._prompt_str) | ||
| 336 | except EOFError: | ||
| 337 | line = 'EOF' | ||
| 338 | else: | ||
| 339 | self.stdout.write(self._prompt_str) | ||
| 340 | self.stdout.flush() | ||
| 341 | line = self.stdin.readline() | ||
| 342 | if not len(line): | ||
| 343 | line = 'EOF' | ||
| 344 | else: | ||
| 345 | line = line[:-1] # chop '\n' | ||
| 346 | argv = line2argv(line) | ||
| 347 | try: | ||
| 348 | argv = self.precmd(argv) | ||
| 349 | retval = self.onecmd(argv) | ||
| 350 | self.postcmd(argv) | ||
| 351 | except: | ||
| 352 | if not self.cmdexc(argv): | ||
| 353 | raise | ||
| 354 | retval = 1 | ||
| 355 | self.lastretval = retval | ||
| 356 | self.postloop() | ||
| 357 | finally: | ||
| 358 | if self.use_rawinput and self.completekey: | ||
| 359 | try: | ||
| 360 | import readline | ||
| 361 | readline.set_completer(self.old_completer) | ||
| 362 | except ImportError: | ||
| 363 | pass | ||
| 364 | self.cmdlooping = False | ||
| 365 | return retval | ||
| 366 | |||
| 367 | def precmd(self, argv): | ||
| 368 | """Hook method executed just before the command argv is | ||
| 369 | interpreted, but after the input prompt is generated and issued. | ||
| 370 | |||
| 371 | "argv" is the cmd to run. | ||
| 372 | |||
| 373 | Returns an argv to run (i.e. this method can modify the command | ||
| 374 | to run). | ||
| 375 | """ | ||
| 376 | return argv | ||
| 377 | |||
| 378 | def postcmd(self, argv): | ||
| 379 | """Hook method executed just after a command dispatch is finished. | ||
| 380 | |||
| 381 | "argv" is the command that was run. | ||
| 382 | """ | ||
| 383 | pass | ||
| 384 | |||
| 385 | def cmdexc(self, argv): | ||
| 386 | """Called if an exception is raised in any of precmd(), onecmd(), | ||
| 387 | or postcmd(). If True is returned, the exception is deemed to have | ||
| 388 | been dealt with. Otherwise, the exception is re-raised. | ||
| 389 | |||
| 390 | The default implementation handles CmdlnUserError's, which | ||
| 391 | typically correspond to user error in calling commands (as | ||
| 392 | opposed to programmer error in the design of the script using | ||
| 393 | cmdln.py). | ||
| 394 | """ | ||
| 395 | import sys | ||
| 396 | type, exc, traceback = sys.exc_info() | ||
| 397 | if isinstance(exc, CmdlnUserError): | ||
| 398 | msg = "%s %s: %s\nTry '%s help %s' for info.\n"\ | ||
| 399 | % (self.name, argv[0], exc, self.name, argv[0]) | ||
| 400 | self.stderr.write(self._str(msg)) | ||
| 401 | self.stderr.flush() | ||
| 402 | return True | ||
| 403 | |||
| 404 | def onecmd(self, argv): | ||
| 405 | if not argv: | ||
| 406 | return self.emptyline() | ||
| 407 | self.lastcmd = argv | ||
| 408 | cmdname = self._get_canonical_cmd_name(argv[0]) | ||
| 409 | if cmdname: | ||
| 410 | handler = self._get_cmd_handler(cmdname) | ||
| 411 | if handler: | ||
| 412 | return self._dispatch_cmd(handler, argv) | ||
| 413 | return self.default(argv) | ||
| 414 | |||
| 415 | def _dispatch_cmd(self, handler, argv): | ||
| 416 | return handler(argv) | ||
| 417 | |||
| 418 | def default(self, argv): | ||
| 419 | """Hook called to handle a command for which there is no handler. | ||
| 420 | |||
| 421 | "argv" is the command and arguments to run. | ||
| 422 | |||
| 423 | The default implementation writes and error message to stderr | ||
| 424 | and returns an error exit status. | ||
| 425 | |||
| 426 | Returns a numeric command exit status. | ||
| 427 | """ | ||
| 428 | errmsg = self._str(self.unknowncmd % (argv[0],)) | ||
| 429 | if self.cmdlooping: | ||
| 430 | self.stderr.write(errmsg+"\n") | ||
| 431 | else: | ||
| 432 | self.stderr.write("%s: %s\nTry '%s help' for info.\n" | ||
| 433 | % (self._name_str, errmsg, self._name_str)) | ||
| 434 | self.stderr.flush() | ||
| 435 | return 1 | ||
| 436 | |||
| 437 | def parseline(self, line): | ||
| 438 | # This is used by Cmd.complete (readline completer function) to | ||
| 439 | # massage the current line buffer before completion processing. | ||
| 440 | # We override to drop special '!' handling. | ||
| 441 | line = line.strip() | ||
| 442 | if not line: | ||
| 443 | return None, None, line | ||
| 444 | elif line[0] == '?': | ||
| 445 | line = 'help ' + line[1:] | ||
| 446 | i, n = 0, len(line) | ||
| 447 | while i < n and line[i] in self.identchars: i = i+1 | ||
| 448 | cmd, arg = line[:i], line[i:].strip() | ||
| 449 | return cmd, arg, line | ||
| 450 | |||
| 451 | def helpdefault(self, cmd, known): | ||
| 452 | """Hook called to handle help on a command for which there is no | ||
| 453 | help handler. | ||
| 454 | |||
| 455 | "cmd" is the command name on which help was requested. | ||
| 456 | "known" is a boolean indicating if this command is known | ||
| 457 | (i.e. if there is a handler for it). | ||
| 458 | |||
| 459 | Returns a return code. | ||
| 460 | """ | ||
| 461 | if known: | ||
| 462 | msg = self._str(self.nohelp % (cmd,)) | ||
| 463 | if self.cmdlooping: | ||
| 464 | self.stderr.write(msg + '\n') | ||
| 465 | else: | ||
| 466 | self.stderr.write("%s: %s\n" % (self.name, msg)) | ||
| 467 | else: | ||
| 468 | msg = self.unknowncmd % (cmd,) | ||
| 469 | if self.cmdlooping: | ||
| 470 | self.stderr.write(msg + '\n') | ||
| 471 | else: | ||
| 472 | self.stderr.write("%s: %s\n" | ||
| 473 | "Try '%s help' for info.\n" | ||
| 474 | % (self.name, msg, self.name)) | ||
| 475 | self.stderr.flush() | ||
| 476 | return 1 | ||
| 477 | |||
| 478 | def do_help(self, argv): | ||
| 479 | """${cmd_name}: give detailed help on a specific sub-command | ||
| 480 | |||
| 481 | Usage: | ||
| 482 | ${name} help [COMMAND] | ||
| 483 | """ | ||
| 484 | if len(argv) > 1: # asking for help on a particular command | ||
| 485 | doc = None | ||
| 486 | cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1] | ||
| 487 | if not cmdname: | ||
| 488 | return self.helpdefault(argv[1], False) | ||
| 489 | else: | ||
| 490 | helpfunc = getattr(self, "help_"+cmdname, None) | ||
| 491 | if helpfunc: | ||
| 492 | doc = helpfunc() | ||
| 493 | else: | ||
| 494 | handler = self._get_cmd_handler(cmdname) | ||
| 495 | if handler: | ||
| 496 | doc = handler.__doc__ | ||
| 497 | if doc is None: | ||
| 498 | return self.helpdefault(argv[1], handler != None) | ||
| 499 | else: # bare "help" command | ||
| 500 | doc = self.__class__.__doc__ # try class docstring | ||
| 501 | if doc is None: | ||
| 502 | # Try to provide some reasonable useful default help. | ||
| 503 | if self.cmdlooping: prefix = "" | ||
| 504 | else: prefix = self.name+' ' | ||
| 505 | doc = """Usage: | ||
| 506 | %sCOMMAND [ARGS...] | ||
| 507 | %shelp [COMMAND] | ||
| 508 | |||
| 509 | ${option_list} | ||
| 510 | ${command_list} | ||
| 511 | ${help_list} | ||
| 512 | """ % (prefix, prefix) | ||
| 513 | cmdname = None | ||
| 514 | |||
| 515 | if doc: # *do* have help content, massage and print that | ||
| 516 | doc = self._help_reindent(doc) | ||
| 517 | doc = self._help_preprocess(doc, cmdname) | ||
| 518 | doc = doc.rstrip() + '\n' # trim down trailing space | ||
| 519 | self.stdout.write(self._str(doc)) | ||
| 520 | self.stdout.flush() | ||
| 521 | do_help.aliases = ["?"] | ||
| 522 | |||
| 523 | def _help_reindent(self, help, indent=None): | ||
| 524 | """Hook to re-indent help strings before writing to stdout. | ||
| 525 | |||
| 526 | "help" is the help content to re-indent | ||
| 527 | "indent" is a string with which to indent each line of the | ||
| 528 | help content after normalizing. If unspecified or None | ||
| 529 | then the default is use: the 'self.helpindent' class | ||
| 530 | attribute. By default this is the empty string, i.e. | ||
| 531 | no indentation. | ||
| 532 | |||
| 533 | By default, all common leading whitespace is removed and then | ||
| 534 | the lot is indented by 'self.helpindent'. When calculating the | ||
| 535 | common leading whitespace the first line is ignored -- hence | ||
| 536 | help content for Conan can be written as follows and have the | ||
| 537 | expected indentation: | ||
| 538 | |||
| 539 | def do_crush(self, ...): | ||
| 540 | '''${cmd_name}: crush your enemies, see them driven before you... | ||
| 541 | |||
| 542 | c.f. Conan the Barbarian''' | ||
| 543 | """ | ||
| 544 | if indent is None: | ||
| 545 | indent = self.helpindent | ||
| 546 | lines = help.splitlines(0) | ||
| 547 | _dedentlines(lines, skip_first_line=True) | ||
| 548 | lines = [(indent+line).rstrip() for line in lines] | ||
| 549 | return '\n'.join(lines) | ||
| 550 | |||
| 551 | def _help_preprocess(self, help, cmdname): | ||
| 552 | """Hook to preprocess a help string before writing to stdout. | ||
| 553 | |||
| 554 | "help" is the help string to process. | ||
| 555 | "cmdname" is the canonical sub-command name for which help | ||
| 556 | is being given, or None if the help is not specific to a | ||
| 557 | command. | ||
| 558 | |||
| 559 | By default the following template variables are interpolated in | ||
| 560 | help content. (Note: these are similar to Python 2.4's | ||
| 561 | string.Template interpolation but not quite.) | ||
| 562 | |||
| 563 | ${name} | ||
| 564 | The tool's/shell's name, i.e. 'self.name'. | ||
| 565 | ${option_list} | ||
| 566 | A formatted table of options for this shell/tool. | ||
| 567 | ${command_list} | ||
| 568 | A formatted table of available sub-commands. | ||
| 569 | ${help_list} | ||
| 570 | A formatted table of additional help topics (i.e. 'help_*' | ||
| 571 | methods with no matching 'do_*' method). | ||
| 572 | ${cmd_name} | ||
| 573 | The name (and aliases) for this sub-command formatted as: | ||
| 574 | "NAME (ALIAS1, ALIAS2, ...)". | ||
| 575 | ${cmd_usage} | ||
| 576 | A formatted usage block inferred from the command function | ||
| 577 | signature. | ||
| 578 | ${cmd_option_list} | ||
| 579 | A formatted table of options for this sub-command. (This is | ||
| 580 | only available for commands using the optparse integration, | ||
| 581 | i.e. using @cmdln.option decorators or manually setting the | ||
| 582 | 'optparser' attribute on the 'do_*' method.) | ||
| 583 | |||
| 584 | Returns the processed help. | ||
| 585 | """ | ||
| 586 | preprocessors = { | ||
| 587 | "${name}": self._help_preprocess_name, | ||
| 588 | "${option_list}": self._help_preprocess_option_list, | ||
| 589 | "${command_list}": self._help_preprocess_command_list, | ||
| 590 | "${help_list}": self._help_preprocess_help_list, | ||
| 591 | "${cmd_name}": self._help_preprocess_cmd_name, | ||
| 592 | "${cmd_usage}": self._help_preprocess_cmd_usage, | ||
| 593 | "${cmd_option_list}": self._help_preprocess_cmd_option_list, | ||
| 594 | } | ||
| 595 | |||
| 596 | for marker, preprocessor in preprocessors.items(): | ||
| 597 | if marker in help: | ||
| 598 | help = preprocessor(help, cmdname) | ||
| 599 | return help | ||
| 600 | |||
| 601 | def _help_preprocess_name(self, help, cmdname=None): | ||
| 602 | return help.replace("${name}", self.name) | ||
| 603 | |||
| 604 | def _help_preprocess_option_list(self, help, cmdname=None): | ||
| 605 | marker = "${option_list}" | ||
| 606 | indent, indent_width = _get_indent(marker, help) | ||
| 607 | suffix = _get_trailing_whitespace(marker, help) | ||
| 608 | |||
| 609 | if self.optparser: | ||
| 610 | # Setup formatting options and format. | ||
| 611 | # - Indentation of 4 is better than optparse default of 2. | ||
| 612 | # C.f. Damian Conway's discussion of this in Perl Best | ||
| 613 | # Practices. | ||
| 614 | self.optparser.formatter.indent_increment = 4 | ||
| 615 | self.optparser.formatter.current_indent = indent_width | ||
| 616 | block = self.optparser.format_option_help() + '\n' | ||
| 617 | else: | ||
| 618 | block = "" | ||
| 619 | |||
| 620 | help = help.replace(indent+marker+suffix, block, 1) | ||
| 621 | return help | ||
| 622 | |||
| 623 | |||
| 624 | def _help_preprocess_command_list(self, help, cmdname=None): | ||
| 625 | marker = "${command_list}" | ||
| 626 | indent, indent_width = _get_indent(marker, help) | ||
| 627 | suffix = _get_trailing_whitespace(marker, help) | ||
| 628 | |||
| 629 | # Find any aliases for commands. | ||
| 630 | token2canonical = self._get_canonical_map() | ||
| 631 | aliases = {} | ||
| 632 | for token, cmdname in token2canonical.items(): | ||
| 633 | if token == cmdname: continue | ||
| 634 | aliases.setdefault(cmdname, []).append(token) | ||
| 635 | |||
| 636 | # Get the list of (non-hidden) commands and their | ||
| 637 | # documentation, if any. | ||
| 638 | cmdnames = {} # use a dict to strip duplicates | ||
| 639 | for attr in self.get_names(): | ||
| 640 | if attr.startswith("do_"): | ||
| 641 | cmdnames[attr[3:]] = True | ||
| 642 | cmdnames = cmdnames.keys() | ||
| 643 | cmdnames.sort() | ||
| 644 | linedata = [] | ||
| 645 | for cmdname in cmdnames: | ||
| 646 | if aliases.get(cmdname): | ||
| 647 | a = aliases[cmdname] | ||
| 648 | a.sort() | ||
| 649 | cmdstr = "%s (%s)" % (cmdname, ", ".join(a)) | ||
| 650 | else: | ||
| 651 | cmdstr = cmdname | ||
| 652 | doc = None | ||
| 653 | try: | ||
| 654 | helpfunc = getattr(self, 'help_'+cmdname) | ||
| 655 | except AttributeError: | ||
| 656 | handler = self._get_cmd_handler(cmdname) | ||
| 657 | if handler: | ||
| 658 | doc = handler.__doc__ | ||
| 659 | else: | ||
| 660 | doc = helpfunc() | ||
| 661 | |||
| 662 | # Strip "${cmd_name}: " from the start of a command's doc. Best | ||
| 663 | # practice dictates that command help strings begin with this, but | ||
| 664 | # it isn't at all wanted for the command list. | ||
| 665 | to_strip = "${cmd_name}:" | ||
| 666 | if doc and doc.startswith(to_strip): | ||
| 667 | #log.debug("stripping %r from start of %s's help string", | ||
| 668 | # to_strip, cmdname) | ||
| 669 | doc = doc[len(to_strip):].lstrip() | ||
| 670 | linedata.append( (cmdstr, doc) ) | ||
| 671 | |||
| 672 | if linedata: | ||
| 673 | subindent = indent + ' '*4 | ||
| 674 | lines = _format_linedata(linedata, subindent, indent_width+4) | ||
| 675 | block = indent + "Commands:\n" \ | ||
| 676 | + '\n'.join(lines) + "\n\n" | ||
| 677 | help = help.replace(indent+marker+suffix, block, 1) | ||
| 678 | return help | ||
| 679 | |||
| 680 | def _gen_names_and_attrs(self): | ||
| 681 | # Inheritance says we have to look in class and | ||
| 682 | # base classes; order is not important. | ||
| 683 | names = [] | ||
| 684 | classes = [self.__class__] | ||
| 685 | while classes: | ||
| 686 | aclass = classes.pop(0) | ||
| 687 | if aclass.__bases__: | ||
| 688 | classes = classes + list(aclass.__bases__) | ||
| 689 | for name in dir(aclass): | ||
| 690 | yield (name, getattr(aclass, name)) | ||
| 691 | |||
| 692 | def _help_preprocess_help_list(self, help, cmdname=None): | ||
| 693 | marker = "${help_list}" | ||
| 694 | indent, indent_width = _get_indent(marker, help) | ||
| 695 | suffix = _get_trailing_whitespace(marker, help) | ||
| 696 | |||
| 697 | # Determine the additional help topics, if any. | ||
| 698 | helpnames = {} | ||
| 699 | token2cmdname = self._get_canonical_map() | ||
| 700 | for attrname, attr in self._gen_names_and_attrs(): | ||
| 701 | if not attrname.startswith("help_"): continue | ||
| 702 | helpname = attrname[5:] | ||
| 703 | if helpname not in token2cmdname: | ||
| 704 | helpnames[helpname] = attr | ||
| 705 | |||
| 706 | if helpnames: | ||
| 707 | linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()] | ||
| 708 | linedata.sort() | ||
| 709 | |||
| 710 | subindent = indent + ' '*4 | ||
| 711 | lines = _format_linedata(linedata, subindent, indent_width+4) | ||
| 712 | block = (indent | ||
| 713 | + "Additional help topics (run `%s help TOPIC'):\n" % self.name | ||
| 714 | + '\n'.join(lines) | ||
| 715 | + "\n\n") | ||
| 716 | else: | ||
| 717 | block = '' | ||
| 718 | help = help.replace(indent+marker+suffix, block, 1) | ||
| 719 | return help | ||
| 720 | |||
| 721 | def _help_preprocess_cmd_name(self, help, cmdname=None): | ||
| 722 | marker = "${cmd_name}" | ||
| 723 | handler = self._get_cmd_handler(cmdname) | ||
| 724 | if not handler: | ||
| 725 | raise CmdlnError("cannot preprocess '%s' into help string: " | ||
| 726 | "could not find command handler for %r" | ||
| 727 | % (marker, cmdname)) | ||
| 728 | s = cmdname | ||
| 729 | if hasattr(handler, "aliases"): | ||
| 730 | s += " (%s)" % (", ".join(handler.aliases)) | ||
| 731 | help = help.replace(marker, s) | ||
| 732 | return help | ||
| 733 | |||
| 734 | #TODO: this only makes sense as part of the Cmdln class. | ||
| 735 | # Add hooks to add help preprocessing template vars and put | ||
| 736 | # this one on that class. | ||
| 737 | def _help_preprocess_cmd_usage(self, help, cmdname=None): | ||
| 738 | marker = "${cmd_usage}" | ||
| 739 | handler = self._get_cmd_handler(cmdname) | ||
| 740 | if not handler: | ||
| 741 | raise CmdlnError("cannot preprocess '%s' into help string: " | ||
| 742 | "could not find command handler for %r" | ||
| 743 | % (marker, cmdname)) | ||
| 744 | indent, indent_width = _get_indent(marker, help) | ||
| 745 | suffix = _get_trailing_whitespace(marker, help) | ||
| 746 | |||
| 747 | # Extract the introspection bits we need. | ||
| 748 | func = handler.im_func | ||
| 749 | if func.func_defaults: | ||
| 750 | func_defaults = list(func.func_defaults) | ||
| 751 | else: | ||
| 752 | func_defaults = [] | ||
| 753 | co_argcount = func.func_code.co_argcount | ||
| 754 | co_varnames = func.func_code.co_varnames | ||
| 755 | co_flags = func.func_code.co_flags | ||
| 756 | CO_FLAGS_ARGS = 4 | ||
| 757 | CO_FLAGS_KWARGS = 8 | ||
| 758 | |||
| 759 | # Adjust argcount for possible *args and **kwargs arguments. | ||
| 760 | argcount = co_argcount | ||
| 761 | if co_flags & CO_FLAGS_ARGS: argcount += 1 | ||
| 762 | if co_flags & CO_FLAGS_KWARGS: argcount += 1 | ||
| 763 | |||
| 764 | # Determine the usage string. | ||
| 765 | usage = "%s %s" % (self.name, cmdname) | ||
| 766 | if argcount <= 2: # handler ::= do_FOO(self, argv) | ||
| 767 | usage += " [ARGS...]" | ||
| 768 | elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...) | ||
| 769 | argnames = list(co_varnames[3:argcount]) | ||
| 770 | tail = "" | ||
| 771 | if co_flags & CO_FLAGS_KWARGS: | ||
| 772 | name = argnames.pop(-1) | ||
| 773 | import warnings | ||
| 774 | # There is no generally accepted mechanism for passing | ||
| 775 | # keyword arguments from the command line. Could | ||
| 776 | # *perhaps* consider: arg=value arg2=value2 ... | ||
| 777 | warnings.warn("argument '**%s' on '%s.%s' command " | ||
| 778 | "handler will never get values" | ||
| 779 | % (name, self.__class__.__name__, | ||
| 780 | func.func_name)) | ||
| 781 | if co_flags & CO_FLAGS_ARGS: | ||
| 782 | name = argnames.pop(-1) | ||
| 783 | tail = "[%s...]" % name.upper() | ||
| 784 | while func_defaults: | ||
| 785 | func_defaults.pop(-1) | ||
| 786 | name = argnames.pop(-1) | ||
| 787 | tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail) | ||
| 788 | while argnames: | ||
| 789 | name = argnames.pop(-1) | ||
| 790 | tail = "%s %s" % (name.upper(), tail) | ||
| 791 | usage += ' ' + tail | ||
| 792 | |||
| 793 | block_lines = [ | ||
| 794 | self.helpindent + "Usage:", | ||
| 795 | self.helpindent + ' '*4 + usage | ||
| 796 | ] | ||
| 797 | block = '\n'.join(block_lines) + '\n\n' | ||
| 798 | |||
| 799 | help = help.replace(indent+marker+suffix, block, 1) | ||
| 800 | return help | ||
| 801 | |||
| 802 | #TODO: this only makes sense as part of the Cmdln class. | ||
| 803 | # Add hooks to add help preprocessing template vars and put | ||
| 804 | # this one on that class. | ||
| 805 | def _help_preprocess_cmd_option_list(self, help, cmdname=None): | ||
| 806 | marker = "${cmd_option_list}" | ||
| 807 | handler = self._get_cmd_handler(cmdname) | ||
| 808 | if not handler: | ||
| 809 | raise CmdlnError("cannot preprocess '%s' into help string: " | ||
| 810 | "could not find command handler for %r" | ||
| 811 | % (marker, cmdname)) | ||
| 812 | indent, indent_width = _get_indent(marker, help) | ||
| 813 | suffix = _get_trailing_whitespace(marker, help) | ||
| 814 | if hasattr(handler, "optparser"): | ||
| 815 | # Setup formatting options and format. | ||
| 816 | # - Indentation of 4 is better than optparse default of 2. | ||
| 817 | # C.f. Damian Conway's discussion of this in Perl Best | ||
| 818 | # Practices. | ||
| 819 | handler.optparser.formatter.indent_increment = 4 | ||
| 820 | handler.optparser.formatter.current_indent = indent_width | ||
| 821 | block = handler.optparser.format_option_help() + '\n' | ||
| 822 | else: | ||
| 823 | block = "" | ||
| 824 | |||
| 825 | help = help.replace(indent+marker+suffix, block, 1) | ||
| 826 | return help | ||
| 827 | |||
| 828 | def _get_canonical_cmd_name(self, token): | ||
| 829 | map = self._get_canonical_map() | ||
| 830 | return map.get(token, None) | ||
| 831 | |||
| 832 | def _get_canonical_map(self): | ||
| 833 | """Return a mapping of available command names and aliases to | ||
| 834 | their canonical command name. | ||
| 835 | """ | ||
| 836 | cacheattr = "_token2canonical" | ||
| 837 | if not hasattr(self, cacheattr): | ||
| 838 | # Get the list of commands and their aliases, if any. | ||
| 839 | token2canonical = {} | ||
| 840 | cmd2funcname = {} # use a dict to strip duplicates | ||
| 841 | for attr in self.get_names(): | ||
| 842 | if attr.startswith("do_"): cmdname = attr[3:] | ||
| 843 | elif attr.startswith("_do_"): cmdname = attr[4:] | ||
| 844 | else: | ||
| 845 | continue | ||
| 846 | cmd2funcname[cmdname] = attr | ||
| 847 | token2canonical[cmdname] = cmdname | ||
| 848 | for cmdname, funcname in cmd2funcname.items(): # add aliases | ||
| 849 | func = getattr(self, funcname) | ||
| 850 | aliases = getattr(func, "aliases", []) | ||
| 851 | for alias in aliases: | ||
| 852 | if alias in cmd2funcname: | ||
| 853 | import warnings | ||
| 854 | warnings.warn("'%s' alias for '%s' command conflicts " | ||
| 855 | "with '%s' handler" | ||
| 856 | % (alias, cmdname, cmd2funcname[alias])) | ||
| 857 | continue | ||
| 858 | token2canonical[alias] = cmdname | ||
| 859 | setattr(self, cacheattr, token2canonical) | ||
| 860 | return getattr(self, cacheattr) | ||
| 861 | |||
| 862 | def _get_cmd_handler(self, cmdname): | ||
| 863 | handler = None | ||
| 864 | try: | ||
| 865 | handler = getattr(self, 'do_' + cmdname) | ||
| 866 | except AttributeError: | ||
| 867 | try: | ||
| 868 | # Private command handlers begin with "_do_". | ||
| 869 | handler = getattr(self, '_do_' + cmdname) | ||
| 870 | except AttributeError: | ||
| 871 | pass | ||
| 872 | return handler | ||
| 873 | |||
| 874 | def _do_EOF(self, argv): | ||
| 875 | # Default EOF handler | ||
| 876 | # Note: an actual EOF is redirected to this command. | ||
| 877 | #TODO: separate name for this. Currently it is available from | ||
| 878 | # command-line. Is that okay? | ||
| 879 | self.stdout.write('\n') | ||
| 880 | self.stdout.flush() | ||
| 881 | self.stop = True | ||
| 882 | |||
| 883 | def emptyline(self): | ||
| 884 | # Different from cmd.Cmd: don't repeat the last command for an | ||
| 885 | # emptyline. | ||
| 886 | if self.cmdlooping: | ||
| 887 | pass | ||
| 888 | else: | ||
| 889 | return self.do_help(["help"]) | ||
| 890 | |||
| 891 | |||
| 892 | #---- optparse.py extension to fix (IMO) some deficiencies | ||
| 893 | # | ||
| 894 | # See the class _OptionParserEx docstring for details. | ||
| 895 | # | ||
| 896 | |||
| 897 | class StopOptionProcessing(Exception): | ||
| 898 | """Indicate that option *and argument* processing should stop | ||
| 899 | cleanly. This is not an error condition. It is similar in spirit to | ||
| 900 | StopIteration. This is raised by _OptionParserEx's default "help" | ||
| 901 | and "version" option actions and can be raised by custom option | ||
| 902 | callbacks too. | ||
| 903 | |||
| 904 | Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx) | ||
| 905 | usage is: | ||
| 906 | |||
| 907 | parser = CmdlnOptionParser(mycmd) | ||
| 908 | parser.add_option("-f", "--force", dest="force") | ||
| 909 | ... | ||
| 910 | try: | ||
| 911 | opts, args = parser.parse_args() | ||
| 912 | except StopOptionProcessing: | ||
| 913 | # normal termination, "--help" was probably given | ||
| 914 | sys.exit(0) | ||
| 915 | """ | ||
| 916 | |||
| 917 | class _OptionParserEx(optparse.OptionParser): | ||
| 918 | """An optparse.OptionParser that uses exceptions instead of sys.exit. | ||
| 919 | |||
| 920 | This class is an extension of optparse.OptionParser that differs | ||
| 921 | as follows: | ||
| 922 | - Correct (IMO) the default OptionParser error handling to never | ||
| 923 | sys.exit(). Instead OptParseError exceptions are passed through. | ||
| 924 | - Add the StopOptionProcessing exception (a la StopIteration) to | ||
| 925 | indicate normal termination of option processing. | ||
| 926 | See StopOptionProcessing's docstring for details. | ||
| 927 | |||
| 928 | I'd also like to see the following in the core optparse.py, perhaps | ||
| 929 | as a RawOptionParser which would serve as a base class for the more | ||
| 930 | generally used OptionParser (that works as current): | ||
| 931 | - Remove the implicit addition of the -h|--help and --version | ||
| 932 | options. They can get in the way (e.g. if want '-?' and '-V' for | ||
| 933 | these as well) and it is not hard to do: | ||
| 934 | optparser.add_option("-h", "--help", action="help") | ||
| 935 | optparser.add_option("--version", action="version") | ||
| 936 | These are good practices, just not valid defaults if they can | ||
| 937 | get in the way. | ||
| 938 | """ | ||
| 939 | def error(self, msg): | ||
| 940 | raise optparse.OptParseError(msg) | ||
| 941 | |||
| 942 | def exit(self, status=0, msg=None): | ||
| 943 | if status == 0: | ||
| 944 | raise StopOptionProcessing(msg) | ||
| 945 | else: | ||
| 946 | #TODO: don't lose status info here | ||
| 947 | raise optparse.OptParseError(msg) | ||
| 948 | |||
| 949 | |||
| 950 | |||
| 951 | #---- optparse.py-based option processing support | ||
| 952 | |||
| 953 | class CmdlnOptionParser(_OptionParserEx): | ||
| 954 | """An optparse.OptionParser class more appropriate for top-level | ||
| 955 | Cmdln options. For parsing of sub-command options, see | ||
| 956 | SubCmdOptionParser. | ||
| 957 | |||
| 958 | Changes: | ||
| 959 | - disable_interspersed_args() by default, because a Cmdln instance | ||
| 960 | has sub-commands which may themselves have options. | ||
| 961 | - Redirect print_help() to the Cmdln.do_help() which is better | ||
| 962 | equiped to handle the "help" action. | ||
| 963 | - error() will raise a CmdlnUserError: OptionParse.error() is meant | ||
| 964 | to be called for user errors. Raising a well-known error here can | ||
| 965 | make error handling clearer. | ||
| 966 | - Also see the changes in _OptionParserEx. | ||
| 967 | """ | ||
| 968 | def __init__(self, cmdln, **kwargs): | ||
| 969 | self.cmdln = cmdln | ||
| 970 | kwargs["prog"] = self.cmdln.name | ||
| 971 | _OptionParserEx.__init__(self, **kwargs) | ||
| 972 | self.disable_interspersed_args() | ||
| 973 | |||
| 974 | def print_help(self, file=None): | ||
| 975 | self.cmdln.onecmd(["help"]) | ||
| 976 | |||
| 977 | def error(self, msg): | ||
| 978 | raise CmdlnUserError(msg) | ||
| 979 | |||
| 980 | |||
| 981 | class SubCmdOptionParser(_OptionParserEx): | ||
| 982 | def set_cmdln_info(self, cmdln, subcmd): | ||
| 983 | """Called by Cmdln to pass relevant info about itself needed | ||
| 984 | for print_help(). | ||
| 985 | """ | ||
| 986 | self.cmdln = cmdln | ||
| 987 | self.subcmd = subcmd | ||
| 988 | |||
| 989 | def print_help(self, file=None): | ||
| 990 | self.cmdln.onecmd(["help", self.subcmd]) | ||
| 991 | |||
| 992 | def error(self, msg): | ||
| 993 | raise CmdlnUserError(msg) | ||
| 994 | |||
| 995 | |||
| 996 | def option(*args, **kwargs): | ||
| 997 | """Decorator to add an option to the optparser argument of a Cmdln | ||
| 998 | subcommand. | ||
| 999 | |||
| 1000 | Example: | ||
| 1001 | class MyShell(cmdln.Cmdln): | ||
| 1002 | @cmdln.option("-f", "--force", help="force removal") | ||
| 1003 | def do_remove(self, subcmd, opts, *args): | ||
| 1004 | #... | ||
| 1005 | """ | ||
| 1006 | #XXX Is there a possible optimization for many options to not have a | ||
| 1007 | # large stack depth here? | ||
| 1008 | def decorate(f): | ||
| 1009 | if not hasattr(f, "optparser"): | ||
| 1010 | f.optparser = SubCmdOptionParser() | ||
| 1011 | f.optparser.add_option(*args, **kwargs) | ||
| 1012 | return f | ||
| 1013 | return decorate | ||
| 1014 | |||
| 1015 | |||
| 1016 | class Cmdln(RawCmdln): | ||
| 1017 | """An improved (on cmd.Cmd) framework for building multi-subcommand | ||
| 1018 | scripts (think "svn" & "cvs") and simple shells (think "pdb" and | ||
| 1019 | "gdb"). | ||
| 1020 | |||
| 1021 | A simple example: | ||
| 1022 | |||
| 1023 | import cmdln | ||
| 1024 | |||
| 1025 | class MySVN(cmdln.Cmdln): | ||
| 1026 | name = "svn" | ||
| 1027 | |||
| 1028 | @cmdln.aliases('stat', 'st') | ||
| 1029 | @cmdln.option('-v', '--verbose', action='store_true' | ||
| 1030 | help='print verbose information') | ||
| 1031 | def do_status(self, subcmd, opts, *paths): | ||
| 1032 | print "handle 'svn status' command" | ||
| 1033 | |||
| 1034 | #... | ||
| 1035 | |||
| 1036 | if __name__ == "__main__": | ||
| 1037 | shell = MySVN() | ||
| 1038 | retval = shell.main() | ||
| 1039 | sys.exit(retval) | ||
| 1040 | |||
| 1041 | 'Cmdln' extends 'RawCmdln' by providing optparse option processing | ||
| 1042 | integration. See this class' _dispatch_cmd() docstring and | ||
| 1043 | <http://trentm.com/projects/cmdln> for more information. | ||
| 1044 | """ | ||
| 1045 | def _dispatch_cmd(self, handler, argv): | ||
| 1046 | """Introspect sub-command handler signature to determine how to | ||
| 1047 | dispatch the command. The raw handler provided by the base | ||
| 1048 | 'RawCmdln' class is still supported: | ||
| 1049 | |||
| 1050 | def do_foo(self, argv): | ||
| 1051 | # 'argv' is the vector of command line args, argv[0] is | ||
| 1052 | # the command name itself (i.e. "foo" or an alias) | ||
| 1053 | pass | ||
| 1054 | |||
| 1055 | In addition, if the handler has more than 2 arguments option | ||
| 1056 | processing is automatically done (using optparse): | ||
| 1057 | |||
| 1058 | @cmdln.option('-v', '--verbose', action='store_true') | ||
| 1059 | def do_bar(self, subcmd, opts, *args): | ||
| 1060 | # subcmd = <"bar" or an alias> | ||
| 1061 | # opts = <an optparse.Values instance> | ||
| 1062 | if opts.verbose: | ||
| 1063 | print "lots of debugging output..." | ||
| 1064 | # args = <tuple of arguments> | ||
| 1065 | for arg in args: | ||
| 1066 | bar(arg) | ||
| 1067 | |||
| 1068 | TODO: explain that "*args" can be other signatures as well. | ||
| 1069 | |||
| 1070 | The `cmdln.option` decorator corresponds to an `add_option()` | ||
| 1071 | method call on an `optparse.OptionParser` instance. | ||
| 1072 | |||
| 1073 | You can declare a specific number of arguments: | ||
| 1074 | |||
| 1075 | @cmdln.option('-v', '--verbose', action='store_true') | ||
| 1076 | def do_bar2(self, subcmd, opts, bar_one, bar_two): | ||
| 1077 | #... | ||
| 1078 | |||
| 1079 | and an appropriate error message will be raised/printed if the | ||
| 1080 | command is called with a different number of args. | ||
| 1081 | """ | ||
| 1082 | co_argcount = handler.im_func.func_code.co_argcount | ||
| 1083 | if co_argcount == 2: # handler ::= do_foo(self, argv) | ||
| 1084 | return handler(argv) | ||
| 1085 | elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...) | ||
| 1086 | try: | ||
| 1087 | optparser = handler.optparser | ||
| 1088 | except AttributeError: | ||
| 1089 | optparser = handler.im_func.optparser = SubCmdOptionParser() | ||
| 1090 | assert isinstance(optparser, SubCmdOptionParser) | ||
| 1091 | optparser.set_cmdln_info(self, argv[0]) | ||
| 1092 | try: | ||
| 1093 | opts, args = optparser.parse_args(argv[1:]) | ||
| 1094 | except StopOptionProcessing: | ||
| 1095 | #TODO: this doesn't really fly for a replacement of | ||
| 1096 | # optparse.py behaviour, does it? | ||
| 1097 | return 0 # Normal command termination | ||
| 1098 | |||
| 1099 | try: | ||
| 1100 | return handler(argv[0], opts, *args) | ||
| 1101 | except TypeError, ex: | ||
| 1102 | # Some TypeError's are user errors: | ||
| 1103 | # do_foo() takes at least 4 arguments (3 given) | ||
| 1104 | # do_foo() takes at most 5 arguments (6 given) | ||
| 1105 | # do_foo() takes exactly 5 arguments (6 given) | ||
| 1106 | # Raise CmdlnUserError for these with a suitably | ||
| 1107 | # massaged error message. | ||
| 1108 | import sys | ||
| 1109 | tb = sys.exc_info()[2] # the traceback object | ||
| 1110 | if tb.tb_next is not None: | ||
| 1111 | # If the traceback is more than one level deep, then the | ||
| 1112 | # TypeError do *not* happen on the "handler(...)" call | ||
| 1113 | # above. In that we don't want to handle it specially | ||
| 1114 | # here: it would falsely mask deeper code errors. | ||
| 1115 | raise | ||
| 1116 | msg = ex.args[0] | ||
| 1117 | match = _INCORRECT_NUM_ARGS_RE.search(msg) | ||
| 1118 | if match: | ||
| 1119 | msg = list(match.groups()) | ||
| 1120 | msg[1] = int(msg[1]) - 3 | ||
| 1121 | if msg[1] == 1: | ||
| 1122 | msg[2] = msg[2].replace("arguments", "argument") | ||
| 1123 | msg[3] = int(msg[3]) - 3 | ||
| 1124 | msg = ''.join(map(str, msg)) | ||
| 1125 | raise CmdlnUserError(msg) | ||
| 1126 | else: | ||
| 1127 | raise | ||
| 1128 | else: | ||
| 1129 | raise CmdlnError("incorrect argcount for %s(): takes %d, must " | ||
| 1130 | "take 2 for 'argv' signature or 3+ for 'opts' " | ||
| 1131 | "signature" % (handler.__name__, co_argcount)) | ||
| 1132 | |||
| 1133 | |||
| 1134 | |||
| 1135 | #---- internal support functions | ||
| 1136 | |||
| 1137 | def _format_linedata(linedata, indent, indent_width): | ||
| 1138 | """Format specific linedata into a pleasant layout. | ||
| 1139 | |||
| 1140 | "linedata" is a list of 2-tuples of the form: | ||
| 1141 | (<item-display-string>, <item-docstring>) | ||
| 1142 | "indent" is a string to use for one level of indentation | ||
| 1143 | "indent_width" is a number of columns by which the | ||
| 1144 | formatted data will be indented when printed. | ||
| 1145 | |||
| 1146 | The <item-display-string> column is held to 15 columns. | ||
| 1147 | """ | ||
| 1148 | lines = [] | ||
| 1149 | WIDTH = 78 - indent_width | ||
| 1150 | SPACING = 2 | ||
| 1151 | NAME_WIDTH_LOWER_BOUND = 13 | ||
| 1152 | NAME_WIDTH_UPPER_BOUND = 16 | ||
| 1153 | NAME_WIDTH = max([len(s) for s,d in linedata]) | ||
| 1154 | if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND: | ||
| 1155 | NAME_WIDTH = NAME_WIDTH_LOWER_BOUND | ||
| 1156 | else: | ||
| 1157 | NAME_WIDTH = NAME_WIDTH_UPPER_BOUND | ||
| 1158 | |||
| 1159 | DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING | ||
| 1160 | for namestr, doc in linedata: | ||
| 1161 | line = indent + namestr | ||
| 1162 | if len(namestr) <= NAME_WIDTH: | ||
| 1163 | line += ' ' * (NAME_WIDTH + SPACING - len(namestr)) | ||
| 1164 | else: | ||
| 1165 | lines.append(line) | ||
| 1166 | line = indent + ' ' * (NAME_WIDTH + SPACING) | ||
| 1167 | line += _summarize_doc(doc, DOC_WIDTH) | ||
| 1168 | lines.append(line.rstrip()) | ||
| 1169 | return lines | ||
| 1170 | |||
| 1171 | def _summarize_doc(doc, length=60): | ||
| 1172 | r"""Parse out a short one line summary from the given doclines. | ||
| 1173 | |||
| 1174 | "doc" is the doc string to summarize. | ||
| 1175 | "length" is the max length for the summary | ||
| 1176 | |||
| 1177 | >>> _summarize_doc("this function does this") | ||
| 1178 | 'this function does this' | ||
| 1179 | >>> _summarize_doc("this function does this", 10) | ||
| 1180 | 'this fu...' | ||
| 1181 | >>> _summarize_doc("this function does this\nand that") | ||
| 1182 | 'this function does this and that' | ||
| 1183 | >>> _summarize_doc("this function does this\n\nand that") | ||
| 1184 | 'this function does this' | ||
| 1185 | """ | ||
| 1186 | import re | ||
| 1187 | if doc is None: | ||
| 1188 | return "" | ||
| 1189 | assert length > 3, "length <= 3 is absurdly short for a doc summary" | ||
| 1190 | doclines = doc.strip().splitlines(0) | ||
| 1191 | if not doclines: | ||
| 1192 | return "" | ||
| 1193 | |||
| 1194 | summlines = [] | ||
| 1195 | for i, line in enumerate(doclines): | ||
| 1196 | stripped = line.strip() | ||
| 1197 | if not stripped: | ||
| 1198 | break | ||
| 1199 | summlines.append(stripped) | ||
| 1200 | if len(''.join(summlines)) >= length: | ||
| 1201 | break | ||
| 1202 | |||
| 1203 | summary = ' '.join(summlines) | ||
| 1204 | if len(summary) > length: | ||
| 1205 | summary = summary[:length-3] + "..." | ||
| 1206 | return summary | ||
| 1207 | |||
| 1208 | |||
| 1209 | def line2argv(line): | ||
| 1210 | r"""Parse the given line into an argument vector. | ||
| 1211 | |||
| 1212 | "line" is the line of input to parse. | ||
| 1213 | |||
| 1214 | This may get niggly when dealing with quoting and escaping. The | ||
| 1215 | current state of this parsing may not be completely thorough/correct | ||
| 1216 | in this respect. | ||
| 1217 | |||
| 1218 | >>> from cmdln import line2argv | ||
| 1219 | >>> line2argv("foo") | ||
| 1220 | ['foo'] | ||
| 1221 | >>> line2argv("foo bar") | ||
| 1222 | ['foo', 'bar'] | ||
| 1223 | >>> line2argv("foo bar ") | ||
| 1224 | ['foo', 'bar'] | ||
| 1225 | >>> line2argv(" foo bar") | ||
| 1226 | ['foo', 'bar'] | ||
| 1227 | |||
| 1228 | Quote handling: | ||
| 1229 | |||
| 1230 | >>> line2argv("'foo bar'") | ||
| 1231 | ['foo bar'] | ||
| 1232 | >>> line2argv('"foo bar"') | ||
| 1233 | ['foo bar'] | ||
| 1234 | >>> line2argv(r'"foo\"bar"') | ||
| 1235 | ['foo"bar'] | ||
| 1236 | >>> line2argv("'foo bar' spam") | ||
| 1237 | ['foo bar', 'spam'] | ||
| 1238 | >>> line2argv("'foo 'bar spam") | ||
| 1239 | ['foo bar', 'spam'] | ||
| 1240 | |||
| 1241 | >>> line2argv('some\tsimple\ttests') | ||
| 1242 | ['some', 'simple', 'tests'] | ||
| 1243 | >>> line2argv('a "more complex" test') | ||
| 1244 | ['a', 'more complex', 'test'] | ||
| 1245 | >>> line2argv('a more="complex test of " quotes') | ||
| 1246 | ['a', 'more=complex test of ', 'quotes'] | ||
| 1247 | >>> line2argv('a more" complex test of " quotes') | ||
| 1248 | ['a', 'more complex test of ', 'quotes'] | ||
| 1249 | >>> line2argv('an "embedded \\"quote\\""') | ||
| 1250 | ['an', 'embedded "quote"'] | ||
| 1251 | |||
| 1252 | # Komodo bug 48027 | ||
| 1253 | >>> line2argv('foo bar C:\\') | ||
| 1254 | ['foo', 'bar', 'C:\\'] | ||
| 1255 | |||
| 1256 | # Komodo change 127581 | ||
| 1257 | >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"') | ||
| 1258 | ['\\test\\slash', 'foo bar', 'foo"bar'] | ||
| 1259 | |||
| 1260 | # Komodo change 127629 | ||
| 1261 | >>> if sys.platform == "win32": | ||
| 1262 | ... line2argv(r'\foo\bar') == ['\\foo\\bar'] | ||
| 1263 | ... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar'] | ||
| 1264 | ... line2argv('"foo') == ['foo'] | ||
| 1265 | ... else: | ||
| 1266 | ... line2argv(r'\foo\bar') == ['foobar'] | ||
| 1267 | ... line2argv(r'\\foo\\bar') == ['\\foo\\bar'] | ||
| 1268 | ... try: | ||
| 1269 | ... line2argv('"foo') | ||
| 1270 | ... except ValueError, ex: | ||
| 1271 | ... "not terminated" in str(ex) | ||
| 1272 | True | ||
| 1273 | True | ||
| 1274 | True | ||
| 1275 | """ | ||
| 1276 | import string | ||
| 1277 | line = line.strip() | ||
| 1278 | argv = [] | ||
| 1279 | state = "default" | ||
| 1280 | arg = None # the current argument being parsed | ||
| 1281 | i = -1 | ||
| 1282 | while 1: | ||
| 1283 | i += 1 | ||
| 1284 | if i >= len(line): break | ||
| 1285 | ch = line[i] | ||
| 1286 | |||
| 1287 | if ch == "\\" and i+1 < len(line): | ||
| 1288 | # escaped char always added to arg, regardless of state | ||
| 1289 | if arg is None: arg = "" | ||
| 1290 | if (sys.platform == "win32" | ||
| 1291 | or state in ("double-quoted", "single-quoted") | ||
| 1292 | ) and line[i+1] not in tuple('"\''): | ||
| 1293 | arg += ch | ||
| 1294 | i += 1 | ||
| 1295 | arg += line[i] | ||
| 1296 | continue | ||
| 1297 | |||
| 1298 | if state == "single-quoted": | ||
| 1299 | if ch == "'": | ||
| 1300 | state = "default" | ||
| 1301 | else: | ||
| 1302 | arg += ch | ||
| 1303 | elif state == "double-quoted": | ||
| 1304 | if ch == '"': | ||
| 1305 | state = "default" | ||
| 1306 | else: | ||
| 1307 | arg += ch | ||
| 1308 | elif state == "default": | ||
| 1309 | if ch == '"': | ||
| 1310 | if arg is None: arg = "" | ||
| 1311 | state = "double-quoted" | ||
| 1312 | elif ch == "'": | ||
| 1313 | if arg is None: arg = "" | ||
| 1314 | state = "single-quoted" | ||
| 1315 | elif ch in string.whitespace: | ||
| 1316 | if arg is not None: | ||
| 1317 | argv.append(arg) | ||
| 1318 | arg = None | ||
| 1319 | else: | ||
| 1320 | if arg is None: arg = "" | ||
| 1321 | arg += ch | ||
| 1322 | if arg is not None: | ||
| 1323 | argv.append(arg) | ||
| 1324 | if not sys.platform == "win32" and state != "default": | ||
| 1325 | raise ValueError("command line is not terminated: unfinished %s " | ||
| 1326 | "segment" % state) | ||
| 1327 | return argv | ||
| 1328 | |||
| 1329 | |||
| 1330 | def argv2line(argv): | ||
| 1331 | r"""Put together the given argument vector into a command line. | ||
| 1332 | |||
| 1333 | "argv" is the argument vector to process. | ||
| 1334 | |||
| 1335 | >>> from cmdln import argv2line | ||
| 1336 | >>> argv2line(['foo']) | ||
| 1337 | 'foo' | ||
| 1338 | >>> argv2line(['foo', 'bar']) | ||
| 1339 | 'foo bar' | ||
| 1340 | >>> argv2line(['foo', 'bar baz']) | ||
| 1341 | 'foo "bar baz"' | ||
| 1342 | >>> argv2line(['foo"bar']) | ||
| 1343 | 'foo"bar' | ||
| 1344 | >>> print argv2line(['foo" bar']) | ||
| 1345 | 'foo" bar' | ||
| 1346 | >>> print argv2line(["foo' bar"]) | ||
| 1347 | "foo' bar" | ||
| 1348 | >>> argv2line(["foo'bar"]) | ||
| 1349 | "foo'bar" | ||
| 1350 | """ | ||
| 1351 | escapedArgs = [] | ||
| 1352 | for arg in argv: | ||
| 1353 | if ' ' in arg and '"' not in arg: | ||
| 1354 | arg = '"'+arg+'"' | ||
| 1355 | elif ' ' in arg and "'" not in arg: | ||
| 1356 | arg = "'"+arg+"'" | ||
| 1357 | elif ' ' in arg: | ||
| 1358 | arg = arg.replace('"', r'\"') | ||
| 1359 | arg = '"'+arg+'"' | ||
| 1360 | escapedArgs.append(arg) | ||
| 1361 | return ' '.join(escapedArgs) | ||
| 1362 | |||
| 1363 | |||
| 1364 | # Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook | ||
| 1365 | def _dedentlines(lines, tabsize=8, skip_first_line=False): | ||
| 1366 | """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines | ||
| 1367 | |||
| 1368 | "lines" is a list of lines to dedent. | ||
| 1369 | "tabsize" is the tab width to use for indent width calculations. | ||
| 1370 | "skip_first_line" is a boolean indicating if the first line should | ||
| 1371 | be skipped for calculating the indent width and for dedenting. | ||
| 1372 | This is sometimes useful for docstrings and similar. | ||
| 1373 | |||
| 1374 | Same as dedent() except operates on a sequence of lines. Note: the | ||
| 1375 | lines list is modified **in-place**. | ||
| 1376 | """ | ||
| 1377 | DEBUG = False | ||
| 1378 | if DEBUG: | ||
| 1379 | print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ | ||
| 1380 | % (tabsize, skip_first_line) | ||
| 1381 | indents = [] | ||
| 1382 | margin = None | ||
| 1383 | for i, line in enumerate(lines): | ||
| 1384 | if i == 0 and skip_first_line: continue | ||
| 1385 | indent = 0 | ||
| 1386 | for ch in line: | ||
| 1387 | if ch == ' ': | ||
| 1388 | indent += 1 | ||
| 1389 | elif ch == '\t': | ||
| 1390 | indent += tabsize - (indent % tabsize) | ||
| 1391 | elif ch in '\r\n': | ||
| 1392 | continue # skip all-whitespace lines | ||
| 1393 | else: | ||
| 1394 | break | ||
| 1395 | else: | ||
| 1396 | continue # skip all-whitespace lines | ||
| 1397 | if DEBUG: print "dedent: indent=%d: %r" % (indent, line) | ||
| 1398 | if margin is None: | ||
| 1399 | margin = indent | ||
| 1400 | else: | ||
| 1401 | margin = min(margin, indent) | ||
| 1402 | if DEBUG: print "dedent: margin=%r" % margin | ||
| 1403 | |||
| 1404 | if margin is not None and margin > 0: | ||
| 1405 | for i, line in enumerate(lines): | ||
| 1406 | if i == 0 and skip_first_line: continue | ||
| 1407 | removed = 0 | ||
| 1408 | for j, ch in enumerate(line): | ||
| 1409 | if ch == ' ': | ||
| 1410 | removed += 1 | ||
| 1411 | elif ch == '\t': | ||
| 1412 | removed += tabsize - (removed % tabsize) | ||
| 1413 | elif ch in '\r\n': | ||
| 1414 | if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line | ||
| 1415 | lines[i] = lines[i][j:] | ||
| 1416 | break | ||
| 1417 | else: | ||
| 1418 | raise ValueError("unexpected non-whitespace char %r in " | ||
| 1419 | "line %r while removing %d-space margin" | ||
| 1420 | % (ch, line, margin)) | ||
| 1421 | if DEBUG: | ||
| 1422 | print "dedent: %r: %r -> removed %d/%d"\ | ||
| 1423 | % (line, ch, removed, margin) | ||
| 1424 | if removed == margin: | ||
| 1425 | lines[i] = lines[i][j+1:] | ||
| 1426 | break | ||
| 1427 | elif removed > margin: | ||
| 1428 | lines[i] = ' '*(removed-margin) + lines[i][j+1:] | ||
| 1429 | break | ||
| 1430 | return lines | ||
| 1431 | |||
| 1432 | def _dedent(text, tabsize=8, skip_first_line=False): | ||
| 1433 | """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text | ||
| 1434 | |||
| 1435 | "text" is the text to dedent. | ||
| 1436 | "tabsize" is the tab width to use for indent width calculations. | ||
| 1437 | "skip_first_line" is a boolean indicating if the first line should | ||
| 1438 | be skipped for calculating the indent width and for dedenting. | ||
| 1439 | This is sometimes useful for docstrings and similar. | ||
| 1440 | |||
| 1441 | textwrap.dedent(s), but don't expand tabs to spaces | ||
| 1442 | """ | ||
| 1443 | lines = text.splitlines(1) | ||
| 1444 | _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) | ||
| 1445 | return ''.join(lines) | ||
| 1446 | |||
| 1447 | |||
| 1448 | def _get_indent(marker, s, tab_width=8): | ||
| 1449 | """_get_indent(marker, s, tab_width=8) -> | ||
| 1450 | (<indentation-of-'marker'>, <indentation-width>)""" | ||
| 1451 | # Figure out how much the marker is indented. | ||
| 1452 | INDENT_CHARS = tuple(' \t') | ||
| 1453 | start = s.index(marker) | ||
| 1454 | i = start | ||
| 1455 | while i > 0: | ||
| 1456 | if s[i-1] not in INDENT_CHARS: | ||
| 1457 | break | ||
| 1458 | i -= 1 | ||
| 1459 | indent = s[i:start] | ||
| 1460 | indent_width = 0 | ||
| 1461 | for ch in indent: | ||
| 1462 | if ch == ' ': | ||
| 1463 | indent_width += 1 | ||
| 1464 | elif ch == '\t': | ||
| 1465 | indent_width += tab_width - (indent_width % tab_width) | ||
| 1466 | return indent, indent_width | ||
| 1467 | |||
| 1468 | def _get_trailing_whitespace(marker, s): | ||
| 1469 | """Return the whitespace content trailing the given 'marker' in string 's', | ||
| 1470 | up to and including a newline. | ||
| 1471 | """ | ||
| 1472 | suffix = '' | ||
| 1473 | start = s.index(marker) + len(marker) | ||
| 1474 | i = start | ||
| 1475 | while i < len(s): | ||
| 1476 | if s[i] in ' \t': | ||
| 1477 | suffix += s[i] | ||
| 1478 | elif s[i] in '\r\n': | ||
| 1479 | suffix += s[i] | ||
| 1480 | if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n': | ||
| 1481 | suffix += s[i+1] | ||
| 1482 | break | ||
| 1483 | else: | ||
| 1484 | break | ||
| 1485 | i += 1 | ||
| 1486 | return suffix | ||
| 1487 | |||
| 1488 | |||
| 1489 | |||
| 1490 | #---- bash completion support | ||
| 1491 | # Note: This is still experimental. I expect to change this | ||
| 1492 | # significantly. | ||
| 1493 | # | ||
| 1494 | # To get Bash completion for a cmdln.Cmdln class, run the following | ||
| 1495 | # bash command: | ||
| 1496 | # $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname | ||
| 1497 | # For example: | ||
| 1498 | # $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn | ||
| 1499 | # | ||
| 1500 | #TODO: Simplify the above so don't have to given path to script (try to | ||
| 1501 | # find it on PATH, if possible). Could also make class name | ||
| 1502 | # optional if there is only one in the module (common case). | ||
| 1503 | |||
| 1504 | if __name__ == "__main__" and len(sys.argv) == 6: | ||
| 1505 | def _log(s): | ||
| 1506 | return # no-op, comment out for debugging | ||
| 1507 | from os.path import expanduser | ||
| 1508 | fout = open(expanduser("~/tmp/bashcpln.log"), 'a') | ||
| 1509 | fout.write(str(s) + '\n') | ||
| 1510 | fout.close() | ||
| 1511 | |||
| 1512 | # Recipe: module_from_path (1.0.1+) | ||
| 1513 | def _module_from_path(path): | ||
| 1514 | import imp, os, sys | ||
| 1515 | path = os.path.expanduser(path) | ||
| 1516 | dir = os.path.dirname(path) or os.curdir | ||
| 1517 | name = os.path.splitext(os.path.basename(path))[0] | ||
| 1518 | sys.path.insert(0, dir) | ||
| 1519 | try: | ||
| 1520 | iinfo = imp.find_module(name, [dir]) | ||
| 1521 | return imp.load_module(name, *iinfo) | ||
| 1522 | finally: | ||
| 1523 | sys.path.remove(dir) | ||
| 1524 | |||
| 1525 | def _get_bash_cplns(script_path, class_name, cmd_name, | ||
| 1526 | token, preceding_token): | ||
| 1527 | _log('--') | ||
| 1528 | _log('get_cplns(%r, %r, %r, %r, %r)' | ||
| 1529 | % (script_path, class_name, cmd_name, token, preceding_token)) | ||
| 1530 | comp_line = os.environ["COMP_LINE"] | ||
| 1531 | comp_point = int(os.environ["COMP_POINT"]) | ||
| 1532 | _log("COMP_LINE: %r" % comp_line) | ||
| 1533 | _log("COMP_POINT: %r" % comp_point) | ||
| 1534 | |||
| 1535 | try: | ||
| 1536 | script = _module_from_path(script_path) | ||
| 1537 | except ImportError, ex: | ||
| 1538 | _log("error importing `%s': %s" % (script_path, ex)) | ||
| 1539 | return [] | ||
| 1540 | shell = getattr(script, class_name)() | ||
| 1541 | cmd_map = shell._get_canonical_map() | ||
| 1542 | del cmd_map["EOF"] | ||
| 1543 | |||
| 1544 | # Determine if completing the sub-command name. | ||
| 1545 | parts = comp_line[:comp_point].split(None, 1) | ||
| 1546 | _log(parts) | ||
| 1547 | if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]): | ||
| 1548 | #TODO: if parts[1].startswith('-'): handle top-level opts | ||
| 1549 | _log("complete sub-command names") | ||
| 1550 | matches = {} | ||
| 1551 | for name, canon_name in cmd_map.items(): | ||
| 1552 | if name.startswith(token): | ||
| 1553 | matches[name] = canon_name | ||
| 1554 | if not matches: | ||
| 1555 | return [] | ||
| 1556 | elif len(matches) == 1: | ||
| 1557 | return matches.keys() | ||
| 1558 | elif len(set(matches.values())) == 1: | ||
| 1559 | return [matches.values()[0]] | ||
| 1560 | else: | ||
| 1561 | return matches.keys() | ||
| 1562 | |||
| 1563 | # Otherwise, complete options for the given sub-command. | ||
| 1564 | #TODO: refine this so it does the right thing with option args | ||
| 1565 | if token.startswith('-'): | ||
| 1566 | cmd_name = comp_line.split(None, 2)[1] | ||
| 1567 | try: | ||
| 1568 | cmd_canon_name = cmd_map[cmd_name] | ||
| 1569 | except KeyError: | ||
| 1570 | return [] | ||
| 1571 | handler = shell._get_cmd_handler(cmd_canon_name) | ||
| 1572 | optparser = getattr(handler, "optparser", None) | ||
| 1573 | if optparser is None: | ||
| 1574 | optparser = SubCmdOptionParser() | ||
| 1575 | opt_strs = [] | ||
| 1576 | for option in optparser.option_list: | ||
| 1577 | for opt_str in option._short_opts + option._long_opts: | ||
| 1578 | if opt_str.startswith(token): | ||
| 1579 | opt_strs.append(opt_str) | ||
| 1580 | return opt_strs | ||
| 1581 | |||
| 1582 | return [] | ||
| 1583 | |||
| 1584 | for cpln in _get_bash_cplns(*sys.argv[1:]): | ||
| 1585 | print cpln | ||
| 1586 | |||
diff --git a/scripts/lib/mic/utils/errors.py b/scripts/lib/mic/utils/errors.py new file mode 100644 index 0000000000..8d720f9080 --- /dev/null +++ b/scripts/lib/mic/utils/errors.py | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2007 Red Hat, Inc. | ||
| 4 | # Copyright (c) 2011 Intel, Inc. | ||
| 5 | # | ||
| 6 | # This program is free software; you can redistribute it and/or modify it | ||
| 7 | # under the terms of the GNU General Public License as published by the Free | ||
| 8 | # Software Foundation; version 2 of the License | ||
| 9 | # | ||
| 10 | # This program is distributed in the hope that it will be useful, but | ||
| 11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 13 | # for more details. | ||
| 14 | # | ||
| 15 | # You should have received a copy of the GNU General Public License along | ||
| 16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 18 | |||
| 19 | class CreatorError(Exception): | ||
| 20 | """An exception base class for all imgcreate errors.""" | ||
| 21 | keyword = '<creator>' | ||
| 22 | |||
| 23 | def __init__(self, msg): | ||
| 24 | self.msg = msg | ||
| 25 | |||
| 26 | def __str__(self): | ||
| 27 | if isinstance(self.msg, unicode): | ||
| 28 | self.msg = self.msg.encode('utf-8', 'ignore') | ||
| 29 | else: | ||
| 30 | self.msg = str(self.msg) | ||
| 31 | return self.keyword + self.msg | ||
| 32 | |||
| 33 | class Usage(CreatorError): | ||
| 34 | keyword = '<usage>' | ||
| 35 | |||
| 36 | def __str__(self): | ||
| 37 | if isinstance(self.msg, unicode): | ||
| 38 | self.msg = self.msg.encode('utf-8', 'ignore') | ||
| 39 | else: | ||
| 40 | self.msg = str(self.msg) | ||
| 41 | return self.keyword + self.msg + ', please use "--help" for more info' | ||
| 42 | |||
| 43 | class Abort(CreatorError): | ||
| 44 | keyword = '' | ||
| 45 | |||
| 46 | class ConfigError(CreatorError): | ||
| 47 | keyword = '<config>' | ||
| 48 | |||
| 49 | class KsError(CreatorError): | ||
| 50 | keyword = '<kickstart>' | ||
| 51 | |||
| 52 | class RepoError(CreatorError): | ||
| 53 | keyword = '<repo>' | ||
| 54 | |||
| 55 | class RpmError(CreatorError): | ||
| 56 | keyword = '<rpm>' | ||
| 57 | |||
| 58 | class MountError(CreatorError): | ||
| 59 | keyword = '<mount>' | ||
| 60 | |||
| 61 | class SnapshotError(CreatorError): | ||
| 62 | keyword = '<snapshot>' | ||
| 63 | |||
| 64 | class SquashfsError(CreatorError): | ||
| 65 | keyword = '<squashfs>' | ||
| 66 | |||
| 67 | class BootstrapError(CreatorError): | ||
| 68 | keyword = '<bootstrap>' | ||
| 69 | |||
| 70 | class RuntimeError(CreatorError): | ||
| 71 | keyword = '<runtime>' | ||
diff --git a/scripts/lib/mic/utils/fs_related.py b/scripts/lib/mic/utils/fs_related.py new file mode 100644 index 0000000000..b9b9a97175 --- /dev/null +++ b/scripts/lib/mic/utils/fs_related.py | |||
| @@ -0,0 +1,1029 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2007, Red Hat, Inc. | ||
| 4 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
| 5 | # | ||
| 6 | # This program is free software; you can redistribute it and/or modify it | ||
| 7 | # under the terms of the GNU General Public License as published by the Free | ||
| 8 | # Software Foundation; version 2 of the License | ||
| 9 | # | ||
| 10 | # This program is distributed in the hope that it will be useful, but | ||
| 11 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 12 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 13 | # for more details. | ||
| 14 | # | ||
| 15 | # You should have received a copy of the GNU General Public License along | ||
| 16 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 17 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 18 | |||
| 19 | from __future__ import with_statement | ||
| 20 | import os | ||
| 21 | import sys | ||
| 22 | import errno | ||
| 23 | import stat | ||
| 24 | import random | ||
| 25 | import string | ||
| 26 | import time | ||
| 27 | import uuid | ||
| 28 | |||
| 29 | from mic import msger | ||
| 30 | from mic.utils import runner | ||
| 31 | from mic.utils.errors import * | ||
| 32 | |||
| 33 | |||
| 34 | def find_binary_inchroot(binary, chroot): | ||
| 35 | paths = ["/usr/sbin", | ||
| 36 | "/usr/bin", | ||
| 37 | "/sbin", | ||
| 38 | "/bin" | ||
| 39 | ] | ||
| 40 | |||
| 41 | for path in paths: | ||
| 42 | bin_path = "%s/%s" % (path, binary) | ||
| 43 | if os.path.exists("%s/%s" % (chroot, bin_path)): | ||
| 44 | return bin_path | ||
| 45 | return None | ||
| 46 | |||
| 47 | def find_binary_path(binary): | ||
| 48 | if os.environ.has_key("PATH"): | ||
| 49 | paths = os.environ["PATH"].split(":") | ||
| 50 | else: | ||
| 51 | paths = [] | ||
| 52 | if os.environ.has_key("HOME"): | ||
| 53 | paths += [os.environ["HOME"] + "/bin"] | ||
| 54 | paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"] | ||
| 55 | |||
| 56 | for path in paths: | ||
| 57 | bin_path = "%s/%s" % (path, binary) | ||
| 58 | if os.path.exists(bin_path): | ||
| 59 | return bin_path | ||
| 60 | raise CreatorError("Command '%s' is not available." % binary) | ||
| 61 | |||
| 62 | def makedirs(dirname): | ||
| 63 | """A version of os.makedirs() that doesn't throw an | ||
| 64 | exception if the leaf directory already exists. | ||
| 65 | """ | ||
| 66 | try: | ||
| 67 | os.makedirs(dirname) | ||
| 68 | except OSError, err: | ||
| 69 | if err.errno != errno.EEXIST: | ||
| 70 | raise | ||
| 71 | |||
| 72 | def mksquashfs(in_img, out_img): | ||
| 73 | fullpathmksquashfs = find_binary_path("mksquashfs") | ||
| 74 | args = [fullpathmksquashfs, in_img, out_img] | ||
| 75 | |||
| 76 | if not sys.stdout.isatty(): | ||
| 77 | args.append("-no-progress") | ||
| 78 | |||
| 79 | ret = runner.show(args) | ||
| 80 | if ret != 0: | ||
| 81 | raise SquashfsError("'%s' exited with error (%d)" % (' '.join(args), ret)) | ||
| 82 | |||
| 83 | def resize2fs(fs, size): | ||
| 84 | resize2fs = find_binary_path("resize2fs") | ||
| 85 | if size == 0: | ||
| 86 | # it means to minimalize it | ||
| 87 | return runner.show([resize2fs, '-M', fs]) | ||
| 88 | else: | ||
| 89 | return runner.show([resize2fs, fs, "%sK" % (size / 1024,)]) | ||
| 90 | |||
| 91 | def my_fuser(fp): | ||
| 92 | fuser = find_binary_path("fuser") | ||
| 93 | if not os.path.exists(fp): | ||
| 94 | return False | ||
| 95 | |||
| 96 | rc = runner.quiet([fuser, "-s", fp]) | ||
| 97 | if rc == 0: | ||
| 98 | for pid in runner.outs([fuser, fp]).split(): | ||
| 99 | fd = open("/proc/%s/cmdline" % pid, "r") | ||
| 100 | cmdline = fd.read() | ||
| 101 | fd.close() | ||
| 102 | if cmdline[:-1] == "/bin/bash": | ||
| 103 | return True | ||
| 104 | |||
| 105 | # not found | ||
| 106 | return False | ||
| 107 | |||
| 108 | class BindChrootMount: | ||
| 109 | """Represents a bind mount of a directory into a chroot.""" | ||
| 110 | def __init__(self, src, chroot, dest = None, option = None): | ||
| 111 | self.root = os.path.abspath(os.path.expanduser(chroot)) | ||
| 112 | self.option = option | ||
| 113 | |||
| 114 | self.orig_src = self.src = src | ||
| 115 | if os.path.islink(src): | ||
| 116 | self.src = os.readlink(src) | ||
| 117 | if not self.src.startswith('/'): | ||
| 118 | self.src = os.path.abspath(os.path.join(os.path.dirname(src), | ||
| 119 | self.src)) | ||
| 120 | |||
| 121 | if not dest: | ||
| 122 | dest = self.src | ||
| 123 | self.dest = os.path.join(self.root, dest.lstrip('/')) | ||
| 124 | |||
| 125 | self.mounted = False | ||
| 126 | self.mountcmd = find_binary_path("mount") | ||
| 127 | self.umountcmd = find_binary_path("umount") | ||
| 128 | |||
| 129 | def ismounted(self): | ||
| 130 | with open('/proc/mounts') as f: | ||
| 131 | for line in f: | ||
| 132 | if line.split()[1] == os.path.abspath(self.dest): | ||
| 133 | return True | ||
| 134 | |||
| 135 | return False | ||
| 136 | |||
| 137 | def has_chroot_instance(self): | ||
| 138 | lock = os.path.join(self.root, ".chroot.lock") | ||
| 139 | return my_fuser(lock) | ||
| 140 | |||
| 141 | def mount(self): | ||
| 142 | if self.mounted or self.ismounted(): | ||
| 143 | return | ||
| 144 | |||
| 145 | makedirs(self.dest) | ||
| 146 | rc = runner.show([self.mountcmd, "--bind", self.src, self.dest]) | ||
| 147 | if rc != 0: | ||
| 148 | raise MountError("Bind-mounting '%s' to '%s' failed" % | ||
| 149 | (self.src, self.dest)) | ||
| 150 | if self.option: | ||
| 151 | rc = runner.show([self.mountcmd, "--bind", "-o", "remount,%s" % self.option, self.dest]) | ||
| 152 | if rc != 0: | ||
| 153 | raise MountError("Bind-remounting '%s' failed" % self.dest) | ||
| 154 | |||
| 155 | self.mounted = True | ||
| 156 | if os.path.islink(self.orig_src): | ||
| 157 | dest = os.path.join(self.root, self.orig_src.lstrip('/')) | ||
| 158 | if not os.path.exists(dest): | ||
| 159 | os.symlink(self.src, dest) | ||
| 160 | |||
| 161 | def unmount(self): | ||
| 162 | if self.has_chroot_instance(): | ||
| 163 | return | ||
| 164 | |||
| 165 | if self.ismounted(): | ||
| 166 | runner.show([self.umountcmd, "-l", self.dest]) | ||
| 167 | self.mounted = False | ||
| 168 | |||
| 169 | class LoopbackMount: | ||
| 170 | """LoopbackMount compatibility layer for old API""" | ||
| 171 | def __init__(self, lofile, mountdir, fstype = None): | ||
| 172 | self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True) | ||
| 173 | self.losetup = False | ||
| 174 | self.losetupcmd = find_binary_path("losetup") | ||
| 175 | |||
| 176 | def cleanup(self): | ||
| 177 | self.diskmount.cleanup() | ||
| 178 | |||
| 179 | def unmount(self): | ||
| 180 | self.diskmount.unmount() | ||
| 181 | |||
| 182 | def lounsetup(self): | ||
| 183 | if self.losetup: | ||
| 184 | runner.show([self.losetupcmd, "-d", self.loopdev]) | ||
| 185 | self.losetup = False | ||
| 186 | self.loopdev = None | ||
| 187 | |||
| 188 | def loopsetup(self): | ||
| 189 | if self.losetup: | ||
| 190 | return | ||
| 191 | |||
| 192 | self.loopdev = get_loop_device(self.losetupcmd, self.lofile) | ||
| 193 | self.losetup = True | ||
| 194 | |||
| 195 | def mount(self): | ||
| 196 | self.diskmount.mount() | ||
| 197 | |||
| 198 | class SparseLoopbackMount(LoopbackMount): | ||
| 199 | """SparseLoopbackMount compatibility layer for old API""" | ||
| 200 | def __init__(self, lofile, mountdir, size, fstype = None): | ||
| 201 | self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True) | ||
| 202 | |||
| 203 | def expand(self, create = False, size = None): | ||
| 204 | self.diskmount.disk.expand(create, size) | ||
| 205 | |||
| 206 | def truncate(self, size = None): | ||
| 207 | self.diskmount.disk.truncate(size) | ||
| 208 | |||
| 209 | def create(self): | ||
| 210 | self.diskmount.disk.create() | ||
| 211 | |||
| 212 | class SparseExtLoopbackMount(SparseLoopbackMount): | ||
| 213 | """SparseExtLoopbackMount compatibility layer for old API""" | ||
| 214 | def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel): | ||
| 215 | self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True) | ||
| 216 | |||
| 217 | |||
| 218 | def __format_filesystem(self): | ||
| 219 | self.diskmount.__format_filesystem() | ||
| 220 | |||
| 221 | def create(self): | ||
| 222 | self.diskmount.disk.create() | ||
| 223 | |||
| 224 | def resize(self, size = None): | ||
| 225 | return self.diskmount.__resize_filesystem(size) | ||
| 226 | |||
| 227 | def mount(self): | ||
| 228 | self.diskmount.mount() | ||
| 229 | |||
| 230 | def __fsck(self): | ||
| 231 | self.extdiskmount.__fsck() | ||
| 232 | |||
| 233 | def __get_size_from_filesystem(self): | ||
| 234 | return self.diskmount.__get_size_from_filesystem() | ||
| 235 | |||
| 236 | def __resize_to_minimal(self): | ||
| 237 | return self.diskmount.__resize_to_minimal() | ||
| 238 | |||
| 239 | def resparse(self, size = None): | ||
| 240 | return self.diskmount.resparse(size) | ||
| 241 | |||
| 242 | class Disk: | ||
| 243 | """Generic base object for a disk | ||
| 244 | |||
| 245 | The 'create' method must make the disk visible as a block device - eg | ||
| 246 | by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup' | ||
| 247 | method must undo the 'create' operation. | ||
| 248 | """ | ||
| 249 | def __init__(self, size, device = None): | ||
| 250 | self._device = device | ||
| 251 | self._size = size | ||
| 252 | |||
| 253 | def create(self): | ||
| 254 | pass | ||
| 255 | |||
| 256 | def cleanup(self): | ||
| 257 | pass | ||
| 258 | |||
| 259 | def get_device(self): | ||
| 260 | return self._device | ||
| 261 | def set_device(self, path): | ||
| 262 | self._device = path | ||
| 263 | device = property(get_device, set_device) | ||
| 264 | |||
| 265 | def get_size(self): | ||
| 266 | return self._size | ||
| 267 | size = property(get_size) | ||
| 268 | |||
| 269 | |||
| 270 | class RawDisk(Disk): | ||
| 271 | """A Disk backed by a block device. | ||
| 272 | Note that create() is a no-op. | ||
| 273 | """ | ||
| 274 | def __init__(self, size, device): | ||
| 275 | Disk.__init__(self, size, device) | ||
| 276 | |||
| 277 | def fixed(self): | ||
| 278 | return True | ||
| 279 | |||
| 280 | def exists(self): | ||
| 281 | return True | ||
| 282 | |||
| 283 | class LoopbackDisk(Disk): | ||
| 284 | """A Disk backed by a file via the loop module.""" | ||
| 285 | def __init__(self, lofile, size): | ||
| 286 | Disk.__init__(self, size) | ||
| 287 | self.lofile = lofile | ||
| 288 | self.losetupcmd = find_binary_path("losetup") | ||
| 289 | |||
| 290 | def fixed(self): | ||
| 291 | return False | ||
| 292 | |||
| 293 | def exists(self): | ||
| 294 | return os.path.exists(self.lofile) | ||
| 295 | |||
| 296 | def create(self): | ||
| 297 | if self.device is not None: | ||
| 298 | return | ||
| 299 | |||
| 300 | self.device = get_loop_device(self.losetupcmd, self.lofile) | ||
| 301 | |||
| 302 | def cleanup(self): | ||
| 303 | if self.device is None: | ||
| 304 | return | ||
| 305 | msger.debug("Losetup remove %s" % self.device) | ||
| 306 | rc = runner.show([self.losetupcmd, "-d", self.device]) | ||
| 307 | self.device = None | ||
| 308 | |||
| 309 | class SparseLoopbackDisk(LoopbackDisk): | ||
| 310 | """A Disk backed by a sparse file via the loop module.""" | ||
| 311 | def __init__(self, lofile, size): | ||
| 312 | LoopbackDisk.__init__(self, lofile, size) | ||
| 313 | |||
| 314 | def expand(self, create = False, size = None): | ||
| 315 | flags = os.O_WRONLY | ||
| 316 | if create: | ||
| 317 | flags |= os.O_CREAT | ||
| 318 | if not os.path.exists(self.lofile): | ||
| 319 | makedirs(os.path.dirname(self.lofile)) | ||
| 320 | |||
| 321 | if size is None: | ||
| 322 | size = self.size | ||
| 323 | |||
| 324 | msger.debug("Extending sparse file %s to %d" % (self.lofile, size)) | ||
| 325 | if create: | ||
| 326 | fd = os.open(self.lofile, flags, 0644) | ||
| 327 | else: | ||
| 328 | fd = os.open(self.lofile, flags) | ||
| 329 | |||
| 330 | if size <= 0: | ||
| 331 | size = 1 | ||
| 332 | try: | ||
| 333 | os.ftruncate(fd, size) | ||
| 334 | except: | ||
| 335 | # may be limited by 2G in 32bit env | ||
| 336 | os.ftruncate(fd, 2**31L) | ||
| 337 | |||
| 338 | os.close(fd) | ||
| 339 | |||
| 340 | def truncate(self, size = None): | ||
| 341 | if size is None: | ||
| 342 | size = self.size | ||
| 343 | |||
| 344 | msger.debug("Truncating sparse file %s to %d" % (self.lofile, size)) | ||
| 345 | fd = os.open(self.lofile, os.O_WRONLY) | ||
| 346 | os.ftruncate(fd, size) | ||
| 347 | os.close(fd) | ||
| 348 | |||
| 349 | def create(self): | ||
| 350 | self.expand(create = True) | ||
| 351 | LoopbackDisk.create(self) | ||
| 352 | |||
| 353 | class Mount: | ||
| 354 | """A generic base class to deal with mounting things.""" | ||
| 355 | def __init__(self, mountdir): | ||
| 356 | self.mountdir = mountdir | ||
| 357 | |||
| 358 | def cleanup(self): | ||
| 359 | self.unmount() | ||
| 360 | |||
| 361 | def mount(self, options = None): | ||
| 362 | pass | ||
| 363 | |||
| 364 | def unmount(self): | ||
| 365 | pass | ||
| 366 | |||
| 367 | class DiskMount(Mount): | ||
| 368 | """A Mount object that handles mounting of a Disk.""" | ||
| 369 | def __init__(self, disk, mountdir, fstype = None, rmmountdir = True): | ||
| 370 | Mount.__init__(self, mountdir) | ||
| 371 | |||
| 372 | self.disk = disk | ||
| 373 | self.fstype = fstype | ||
| 374 | self.rmmountdir = rmmountdir | ||
| 375 | |||
| 376 | self.mounted = False | ||
| 377 | self.rmdir = False | ||
| 378 | if fstype: | ||
| 379 | self.mkfscmd = find_binary_path("mkfs." + self.fstype) | ||
| 380 | else: | ||
| 381 | self.mkfscmd = None | ||
| 382 | self.mountcmd = find_binary_path("mount") | ||
| 383 | self.umountcmd = find_binary_path("umount") | ||
| 384 | |||
| 385 | def cleanup(self): | ||
| 386 | Mount.cleanup(self) | ||
| 387 | self.disk.cleanup() | ||
| 388 | |||
| 389 | def unmount(self): | ||
| 390 | if self.mounted: | ||
| 391 | msger.debug("Unmounting directory %s" % self.mountdir) | ||
| 392 | runner.quiet('sync') # sync the data on this mount point | ||
| 393 | rc = runner.show([self.umountcmd, "-l", self.mountdir]) | ||
| 394 | if rc == 0: | ||
| 395 | self.mounted = False | ||
| 396 | else: | ||
| 397 | raise MountError("Failed to umount %s" % self.mountdir) | ||
| 398 | if self.rmdir and not self.mounted: | ||
| 399 | try: | ||
| 400 | os.rmdir(self.mountdir) | ||
| 401 | except OSError, e: | ||
| 402 | pass | ||
| 403 | self.rmdir = False | ||
| 404 | |||
| 405 | |||
| 406 | def __create(self): | ||
| 407 | self.disk.create() | ||
| 408 | |||
| 409 | |||
| 410 | def mount(self, options = None): | ||
| 411 | if self.mounted: | ||
| 412 | return | ||
| 413 | |||
| 414 | if not os.path.isdir(self.mountdir): | ||
| 415 | msger.debug("Creating mount point %s" % self.mountdir) | ||
| 416 | os.makedirs(self.mountdir) | ||
| 417 | self.rmdir = self.rmmountdir | ||
| 418 | |||
| 419 | self.__create() | ||
| 420 | |||
| 421 | msger.debug("Mounting %s at %s" % (self.disk.device, self.mountdir)) | ||
| 422 | if options: | ||
| 423 | args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ] | ||
| 424 | else: | ||
| 425 | args = [ self.mountcmd, self.disk.device, self.mountdir ] | ||
| 426 | if self.fstype: | ||
| 427 | args.extend(["-t", self.fstype]) | ||
| 428 | |||
| 429 | rc = runner.show(args) | ||
| 430 | if rc != 0: | ||
| 431 | raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" % | ||
| 432 | (self.disk.device, self.mountdir, " ".join(args), rc)) | ||
| 433 | |||
| 434 | self.mounted = True | ||
| 435 | |||
| 436 | class ExtDiskMount(DiskMount): | ||
| 437 | """A DiskMount object that is able to format/resize ext[23] filesystems.""" | ||
| 438 | def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): | ||
| 439 | DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) | ||
| 440 | self.blocksize = blocksize | ||
| 441 | self.fslabel = fslabel.replace("/", "") | ||
| 442 | self.uuid = str(uuid.uuid4()) | ||
| 443 | self.skipformat = skipformat | ||
| 444 | self.fsopts = fsopts | ||
| 445 | self.extopts = None | ||
| 446 | self.dumpe2fs = find_binary_path("dumpe2fs") | ||
| 447 | self.tune2fs = find_binary_path("tune2fs") | ||
| 448 | |||
| 449 | def __parse_field(self, output, field): | ||
| 450 | for line in output.split("\n"): | ||
| 451 | if line.startswith(field + ":"): | ||
| 452 | return line[len(field) + 1:].strip() | ||
| 453 | |||
| 454 | raise KeyError("Failed to find field '%s' in output" % field) | ||
| 455 | |||
| 456 | def __format_filesystem(self): | ||
| 457 | if self.skipformat: | ||
| 458 | msger.debug("Skip filesystem format.") | ||
| 459 | return | ||
| 460 | |||
| 461 | msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) | ||
| 462 | cmdlist = [self.mkfscmd, "-F", "-L", self.fslabel, "-m", "1", "-b", | ||
| 463 | str(self.blocksize), "-U", self.uuid] | ||
| 464 | if self.extopts: | ||
| 465 | cmdlist.extend(self.extopts.split()) | ||
| 466 | cmdlist.extend([self.disk.device]) | ||
| 467 | |||
| 468 | rc, errout = runner.runtool(cmdlist, catch=2) | ||
| 469 | if rc != 0: | ||
| 470 | raise MountError("Error creating %s filesystem on disk %s:\n%s" % | ||
| 471 | (self.fstype, self.disk.device, errout)) | ||
| 472 | |||
| 473 | if not self.extopts: | ||
| 474 | msger.debug("Tuning filesystem on %s" % self.disk.device) | ||
| 475 | runner.show([self.tune2fs, "-c0", "-i0", "-Odir_index", "-ouser_xattr,acl", self.disk.device]) | ||
| 476 | |||
| 477 | def __resize_filesystem(self, size = None): | ||
| 478 | current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] | ||
| 479 | |||
| 480 | if size is None: | ||
| 481 | size = self.disk.size | ||
| 482 | |||
| 483 | if size == current_size: | ||
| 484 | return | ||
| 485 | |||
| 486 | if size > current_size: | ||
| 487 | self.disk.expand(size) | ||
| 488 | |||
| 489 | self.__fsck() | ||
| 490 | |||
| 491 | resize2fs(self.disk.lofile, size) | ||
| 492 | return size | ||
| 493 | |||
| 494 | def __create(self): | ||
| 495 | resize = False | ||
| 496 | if not self.disk.fixed() and self.disk.exists(): | ||
| 497 | resize = True | ||
| 498 | |||
| 499 | self.disk.create() | ||
| 500 | |||
| 501 | if resize: | ||
| 502 | self.__resize_filesystem() | ||
| 503 | else: | ||
| 504 | self.__format_filesystem() | ||
| 505 | |||
| 506 | def mount(self, options = None): | ||
| 507 | self.__create() | ||
| 508 | DiskMount.mount(self, options) | ||
| 509 | |||
| 510 | def __fsck(self): | ||
| 511 | msger.info("Checking filesystem %s" % self.disk.lofile) | ||
| 512 | runner.quiet(["/sbin/e2fsck", "-f", "-y", self.disk.lofile]) | ||
| 513 | |||
| 514 | def __get_size_from_filesystem(self): | ||
| 515 | return int(self.__parse_field(runner.outs([self.dumpe2fs, '-h', self.disk.lofile]), | ||
| 516 | "Block count")) * self.blocksize | ||
| 517 | |||
| 518 | def __resize_to_minimal(self): | ||
| 519 | self.__fsck() | ||
| 520 | |||
| 521 | # | ||
| 522 | # Use a binary search to find the minimal size | ||
| 523 | # we can resize the image to | ||
| 524 | # | ||
| 525 | bot = 0 | ||
| 526 | top = self.__get_size_from_filesystem() | ||
| 527 | while top != (bot + 1): | ||
| 528 | t = bot + ((top - bot) / 2) | ||
| 529 | |||
| 530 | if not resize2fs(self.disk.lofile, t): | ||
| 531 | top = t | ||
| 532 | else: | ||
| 533 | bot = t | ||
| 534 | return top | ||
| 535 | |||
| 536 | def resparse(self, size = None): | ||
| 537 | self.cleanup() | ||
| 538 | if size == 0: | ||
| 539 | minsize = 0 | ||
| 540 | else: | ||
| 541 | minsize = self.__resize_to_minimal() | ||
| 542 | self.disk.truncate(minsize) | ||
| 543 | |||
| 544 | self.__resize_filesystem(size) | ||
| 545 | return minsize | ||
| 546 | |||
| 547 | class VfatDiskMount(DiskMount): | ||
| 548 | """A DiskMount object that is able to format vfat/msdos filesystems.""" | ||
| 549 | def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): | ||
| 550 | DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) | ||
| 551 | self.blocksize = blocksize | ||
| 552 | self.fslabel = fslabel.replace("/", "") | ||
| 553 | rand1 = random.randint(0, 2**16 - 1) | ||
| 554 | rand2 = random.randint(0, 2**16 - 1) | ||
| 555 | self.uuid = "%04X-%04X" % (rand1, rand2) | ||
| 556 | self.skipformat = skipformat | ||
| 557 | self.fsopts = fsopts | ||
| 558 | self.fsckcmd = find_binary_path("fsck." + self.fstype) | ||
| 559 | |||
| 560 | def __format_filesystem(self): | ||
| 561 | if self.skipformat: | ||
| 562 | msger.debug("Skip filesystem format.") | ||
| 563 | return | ||
| 564 | |||
| 565 | msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) | ||
| 566 | rc = runner.show([self.mkfscmd, "-n", self.fslabel, | ||
| 567 | "-i", self.uuid.replace("-", ""), self.disk.device]) | ||
| 568 | if rc != 0: | ||
| 569 | raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) | ||
| 570 | |||
| 571 | msger.verbose("Tuning filesystem on %s" % self.disk.device) | ||
| 572 | |||
| 573 | def __resize_filesystem(self, size = None): | ||
| 574 | current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] | ||
| 575 | |||
| 576 | if size is None: | ||
| 577 | size = self.disk.size | ||
| 578 | |||
| 579 | if size == current_size: | ||
| 580 | return | ||
| 581 | |||
| 582 | if size > current_size: | ||
| 583 | self.disk.expand(size) | ||
| 584 | |||
| 585 | self.__fsck() | ||
| 586 | |||
| 587 | #resize2fs(self.disk.lofile, size) | ||
| 588 | return size | ||
| 589 | |||
| 590 | def __create(self): | ||
| 591 | resize = False | ||
| 592 | if not self.disk.fixed() and self.disk.exists(): | ||
| 593 | resize = True | ||
| 594 | |||
| 595 | self.disk.create() | ||
| 596 | |||
| 597 | if resize: | ||
| 598 | self.__resize_filesystem() | ||
| 599 | else: | ||
| 600 | self.__format_filesystem() | ||
| 601 | |||
| 602 | def mount(self, options = None): | ||
| 603 | self.__create() | ||
| 604 | DiskMount.mount(self, options) | ||
| 605 | |||
| 606 | def __fsck(self): | ||
| 607 | msger.debug("Checking filesystem %s" % self.disk.lofile) | ||
| 608 | runner.show([self.fsckcmd, "-y", self.disk.lofile]) | ||
| 609 | |||
| 610 | def __get_size_from_filesystem(self): | ||
| 611 | return self.disk.size | ||
| 612 | |||
| 613 | def __resize_to_minimal(self): | ||
| 614 | self.__fsck() | ||
| 615 | |||
| 616 | # | ||
| 617 | # Use a binary search to find the minimal size | ||
| 618 | # we can resize the image to | ||
| 619 | # | ||
| 620 | bot = 0 | ||
| 621 | top = self.__get_size_from_filesystem() | ||
| 622 | return top | ||
| 623 | |||
| 624 | def resparse(self, size = None): | ||
| 625 | self.cleanup() | ||
| 626 | minsize = self.__resize_to_minimal() | ||
| 627 | self.disk.truncate(minsize) | ||
| 628 | self.__resize_filesystem(size) | ||
| 629 | return minsize | ||
| 630 | |||
| 631 | class BtrfsDiskMount(DiskMount): | ||
| 632 | """A DiskMount object that is able to format/resize btrfs filesystems.""" | ||
| 633 | def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None): | ||
| 634 | self.__check_btrfs() | ||
| 635 | DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir) | ||
| 636 | self.blocksize = blocksize | ||
| 637 | self.fslabel = fslabel.replace("/", "") | ||
| 638 | self.uuid = None | ||
| 639 | self.skipformat = skipformat | ||
| 640 | self.fsopts = fsopts | ||
| 641 | self.blkidcmd = find_binary_path("blkid") | ||
| 642 | self.btrfsckcmd = find_binary_path("btrfsck") | ||
| 643 | |||
| 644 | def __check_btrfs(self): | ||
| 645 | found = False | ||
| 646 | """ Need to load btrfs module to mount it """ | ||
| 647 | load_module("btrfs") | ||
| 648 | for line in open("/proc/filesystems").xreadlines(): | ||
| 649 | if line.find("btrfs") > -1: | ||
| 650 | found = True | ||
| 651 | break | ||
| 652 | if not found: | ||
| 653 | raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.") | ||
| 654 | |||
| 655 | # disable selinux, selinux will block write | ||
| 656 | if os.path.exists("/usr/sbin/setenforce"): | ||
| 657 | runner.show(["/usr/sbin/setenforce", "0"]) | ||
| 658 | |||
| 659 | def __parse_field(self, output, field): | ||
| 660 | for line in output.split(" "): | ||
| 661 | if line.startswith(field + "="): | ||
| 662 | return line[len(field) + 1:].strip().replace("\"", "") | ||
| 663 | |||
| 664 | raise KeyError("Failed to find field '%s' in output" % field) | ||
| 665 | |||
| 666 | def __format_filesystem(self): | ||
| 667 | if self.skipformat: | ||
| 668 | msger.debug("Skip filesystem format.") | ||
| 669 | return | ||
| 670 | |||
| 671 | msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device)) | ||
| 672 | rc = runner.show([self.mkfscmd, "-L", self.fslabel, self.disk.device]) | ||
| 673 | if rc != 0: | ||
| 674 | raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device)) | ||
| 675 | |||
| 676 | self.uuid = self.__parse_field(runner.outs([self.blkidcmd, self.disk.device]), "UUID") | ||
| 677 | |||
| 678 | def __resize_filesystem(self, size = None): | ||
| 679 | current_size = os.stat(self.disk.lofile)[stat.ST_SIZE] | ||
| 680 | |||
| 681 | if size is None: | ||
| 682 | size = self.disk.size | ||
| 683 | |||
| 684 | if size == current_size: | ||
| 685 | return | ||
| 686 | |||
| 687 | if size > current_size: | ||
| 688 | self.disk.expand(size) | ||
| 689 | |||
| 690 | self.__fsck() | ||
| 691 | return size | ||
| 692 | |||
| 693 | def __create(self): | ||
| 694 | resize = False | ||
| 695 | if not self.disk.fixed() and self.disk.exists(): | ||
| 696 | resize = True | ||
| 697 | |||
| 698 | self.disk.create() | ||
| 699 | |||
| 700 | if resize: | ||
| 701 | self.__resize_filesystem() | ||
| 702 | else: | ||
| 703 | self.__format_filesystem() | ||
| 704 | |||
| 705 | def mount(self, options = None): | ||
| 706 | self.__create() | ||
| 707 | DiskMount.mount(self, options) | ||
| 708 | |||
| 709 | def __fsck(self): | ||
| 710 | msger.debug("Checking filesystem %s" % self.disk.lofile) | ||
| 711 | runner.quiet([self.btrfsckcmd, self.disk.lofile]) | ||
| 712 | |||
| 713 | def __get_size_from_filesystem(self): | ||
| 714 | return self.disk.size | ||
| 715 | |||
| 716 | def __resize_to_minimal(self): | ||
| 717 | self.__fsck() | ||
| 718 | |||
| 719 | return self.__get_size_from_filesystem() | ||
| 720 | |||
| 721 | def resparse(self, size = None): | ||
| 722 | self.cleanup() | ||
| 723 | minsize = self.__resize_to_minimal() | ||
| 724 | self.disk.truncate(minsize) | ||
| 725 | self.__resize_filesystem(size) | ||
| 726 | return minsize | ||
| 727 | |||
| 728 | class DeviceMapperSnapshot(object): | ||
| 729 | def __init__(self, imgloop, cowloop): | ||
| 730 | self.imgloop = imgloop | ||
| 731 | self.cowloop = cowloop | ||
| 732 | |||
| 733 | self.__created = False | ||
| 734 | self.__name = None | ||
| 735 | self.dmsetupcmd = find_binary_path("dmsetup") | ||
| 736 | |||
| 737 | """Load dm_snapshot if it isn't loaded""" | ||
| 738 | load_module("dm_snapshot") | ||
| 739 | |||
| 740 | def get_path(self): | ||
| 741 | if self.__name is None: | ||
| 742 | return None | ||
| 743 | return os.path.join("/dev/mapper", self.__name) | ||
| 744 | path = property(get_path) | ||
| 745 | |||
| 746 | def create(self): | ||
| 747 | if self.__created: | ||
| 748 | return | ||
| 749 | |||
| 750 | self.imgloop.create() | ||
| 751 | self.cowloop.create() | ||
| 752 | |||
| 753 | self.__name = "imgcreate-%d-%d" % (os.getpid(), | ||
| 754 | random.randint(0, 2**16)) | ||
| 755 | |||
| 756 | size = os.stat(self.imgloop.lofile)[stat.ST_SIZE] | ||
| 757 | |||
| 758 | table = "0 %d snapshot %s %s p 8" % (size / 512, | ||
| 759 | self.imgloop.device, | ||
| 760 | self.cowloop.device) | ||
| 761 | |||
| 762 | args = [self.dmsetupcmd, "create", self.__name, "--table", table] | ||
| 763 | if runner.show(args) != 0: | ||
| 764 | self.cowloop.cleanup() | ||
| 765 | self.imgloop.cleanup() | ||
| 766 | raise SnapshotError("Could not create snapshot device using: " + ' '.join(args)) | ||
| 767 | |||
| 768 | self.__created = True | ||
| 769 | |||
| 770 | def remove(self, ignore_errors = False): | ||
| 771 | if not self.__created: | ||
| 772 | return | ||
| 773 | |||
| 774 | time.sleep(2) | ||
| 775 | rc = runner.show([self.dmsetupcmd, "remove", self.__name]) | ||
| 776 | if not ignore_errors and rc != 0: | ||
| 777 | raise SnapshotError("Could not remove snapshot device") | ||
| 778 | |||
| 779 | self.__name = None | ||
| 780 | self.__created = False | ||
| 781 | |||
| 782 | self.cowloop.cleanup() | ||
| 783 | self.imgloop.cleanup() | ||
| 784 | |||
| 785 | def get_cow_used(self): | ||
| 786 | if not self.__created: | ||
| 787 | return 0 | ||
| 788 | |||
| 789 | # | ||
| 790 | # dmsetup status on a snapshot returns e.g. | ||
| 791 | # "0 8388608 snapshot 416/1048576" | ||
| 792 | # or, more generally: | ||
| 793 | # "A B snapshot C/D" | ||
| 794 | # where C is the number of 512 byte sectors in use | ||
| 795 | # | ||
| 796 | out = runner.outs([self.dmsetupcmd, "status", self.__name]) | ||
| 797 | try: | ||
| 798 | return int((out.split()[3]).split('/')[0]) * 512 | ||
| 799 | except ValueError: | ||
| 800 | raise SnapshotError("Failed to parse dmsetup status: " + out) | ||
| 801 | |||
| 802 | def create_image_minimizer(path, image, minimal_size): | ||
| 803 | """ | ||
| 804 | Builds a copy-on-write image which can be used to | ||
| 805 | create a device-mapper snapshot of an image where | ||
| 806 | the image's filesystem is as small as possible | ||
| 807 | |||
| 808 | The steps taken are: | ||
| 809 | 1) Create a sparse COW | ||
| 810 | 2) Loopback mount the image and the COW | ||
| 811 | 3) Create a device-mapper snapshot of the image | ||
| 812 | using the COW | ||
| 813 | 4) Resize the filesystem to the minimal size | ||
| 814 | 5) Determine the amount of space used in the COW | ||
| 815 | 6) Restroy the device-mapper snapshot | ||
| 816 | 7) Truncate the COW, removing unused space | ||
| 817 | 8) Create a squashfs of the COW | ||
| 818 | """ | ||
| 819 | imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter | ||
| 820 | |||
| 821 | cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"), | ||
| 822 | 64L * 1024L * 1024L) | ||
| 823 | |||
| 824 | snapshot = DeviceMapperSnapshot(imgloop, cowloop) | ||
| 825 | |||
| 826 | try: | ||
| 827 | snapshot.create() | ||
| 828 | |||
| 829 | resize2fs(snapshot.path, minimal_size) | ||
| 830 | |||
| 831 | cow_used = snapshot.get_cow_used() | ||
| 832 | finally: | ||
| 833 | snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None)) | ||
| 834 | |||
| 835 | cowloop.truncate(cow_used) | ||
| 836 | |||
| 837 | mksquashfs(cowloop.lofile, path) | ||
| 838 | |||
| 839 | os.unlink(cowloop.lofile) | ||
| 840 | |||
| 841 | def load_module(module): | ||
| 842 | found = False | ||
| 843 | for line in open('/proc/modules').xreadlines(): | ||
| 844 | if line.startswith("%s " % module): | ||
| 845 | found = True | ||
| 846 | break | ||
| 847 | if not found: | ||
| 848 | msger.info("Loading %s..." % module) | ||
| 849 | runner.quiet(['modprobe', module]) | ||
| 850 | |||
| 851 | class LoopDevice(object): | ||
| 852 | def __init__(self, loopid=None): | ||
| 853 | self.device = None | ||
| 854 | self.loopid = loopid | ||
| 855 | self.created = False | ||
| 856 | self.kpartxcmd = find_binary_path("kpartx") | ||
| 857 | self.losetupcmd = find_binary_path("losetup") | ||
| 858 | |||
| 859 | def register(self, device): | ||
| 860 | self.device = device | ||
| 861 | self.loopid = None | ||
| 862 | self.created = True | ||
| 863 | |||
| 864 | def reg_atexit(self): | ||
| 865 | import atexit | ||
| 866 | atexit.register(self.close) | ||
| 867 | |||
| 868 | def _genloopid(self): | ||
| 869 | import glob | ||
| 870 | if not glob.glob("/dev/loop[0-9]*"): | ||
| 871 | return 10 | ||
| 872 | |||
| 873 | fint = lambda x: x[9:].isdigit() and int(x[9:]) or 0 | ||
| 874 | maxid = 1 + max(filter(lambda x: x<100, | ||
| 875 | map(fint, glob.glob("/dev/loop[0-9]*")))) | ||
| 876 | if maxid < 10: maxid = 10 | ||
| 877 | if maxid >= 100: raise | ||
| 878 | return maxid | ||
| 879 | |||
| 880 | def _kpseek(self, device): | ||
| 881 | rc, out = runner.runtool([self.kpartxcmd, '-l', '-v', device]) | ||
| 882 | if rc != 0: | ||
| 883 | raise MountError("Can't query dm snapshot on %s" % device) | ||
| 884 | for line in out.splitlines(): | ||
| 885 | if line and line.startswith("loop"): | ||
| 886 | return True | ||
| 887 | return False | ||
| 888 | |||
| 889 | def _loseek(self, device): | ||
| 890 | import re | ||
| 891 | rc, out = runner.runtool([self.losetupcmd, '-a']) | ||
| 892 | if rc != 0: | ||
| 893 | raise MountError("Failed to run 'losetup -a'") | ||
| 894 | for line in out.splitlines(): | ||
| 895 | m = re.match("([^:]+): .*", line) | ||
| 896 | if m and m.group(1) == device: | ||
| 897 | return True | ||
| 898 | return False | ||
| 899 | |||
| 900 | def create(self): | ||
| 901 | if not self.created: | ||
| 902 | if not self.loopid: | ||
| 903 | self.loopid = self._genloopid() | ||
| 904 | self.device = "/dev/loop%d" % self.loopid | ||
| 905 | if os.path.exists(self.device): | ||
| 906 | if self._loseek(self.device): | ||
| 907 | raise MountError("Device busy: %s" % self.device) | ||
| 908 | else: | ||
| 909 | self.created = True | ||
| 910 | return | ||
| 911 | |||
| 912 | mknod = find_binary_path('mknod') | ||
| 913 | rc = runner.show([mknod, '-m664', self.device, 'b', '7', str(self.loopid)]) | ||
| 914 | if rc != 0: | ||
| 915 | raise MountError("Failed to create device %s" % self.device) | ||
| 916 | else: | ||
| 917 | self.created = True | ||
| 918 | |||
| 919 | def close(self): | ||
| 920 | if self.created: | ||
| 921 | try: | ||
| 922 | self.cleanup() | ||
| 923 | self.device = None | ||
| 924 | except MountError, e: | ||
| 925 | msger.error("%s" % e) | ||
| 926 | |||
| 927 | def cleanup(self): | ||
| 928 | |||
| 929 | if self.device is None: | ||
| 930 | return | ||
| 931 | |||
| 932 | |||
| 933 | if self._kpseek(self.device): | ||
| 934 | if self.created: | ||
| 935 | for i in range(3, os.sysconf("SC_OPEN_MAX")): | ||
| 936 | try: | ||
| 937 | os.close(i) | ||
| 938 | except: | ||
| 939 | pass | ||
| 940 | runner.quiet([self.kpartxcmd, "-d", self.device]) | ||
| 941 | if self._loseek(self.device): | ||
| 942 | runner.quiet([self.losetupcmd, "-d", self.device]) | ||
| 943 | # FIXME: should sleep a while between two loseek | ||
| 944 | if self._loseek(self.device): | ||
| 945 | msger.warning("Can't cleanup loop device %s" % self.device) | ||
| 946 | elif self.loopid: | ||
| 947 | os.unlink(self.device) | ||
| 948 | |||
| 949 | DEVICE_PIDFILE_DIR = "/var/tmp/mic/device" | ||
| 950 | DEVICE_LOCKFILE = "/var/lock/__mic_loopdev.lock" | ||
| 951 | |||
| 952 | def get_loop_device(losetupcmd, lofile): | ||
| 953 | global DEVICE_PIDFILE_DIR | ||
| 954 | global DEVICE_LOCKFILE | ||
| 955 | |||
| 956 | import fcntl | ||
| 957 | makedirs(os.path.dirname(DEVICE_LOCKFILE)) | ||
| 958 | fp = open(DEVICE_LOCKFILE, 'w') | ||
| 959 | fcntl.flock(fp, fcntl.LOCK_EX) | ||
| 960 | try: | ||
| 961 | loopdev = None | ||
| 962 | devinst = LoopDevice() | ||
| 963 | |||
| 964 | # clean up left loop device first | ||
| 965 | clean_loop_devices() | ||
| 966 | |||
| 967 | # provide an avaible loop device | ||
| 968 | rc, out = runner.runtool([losetupcmd, "--find"]) | ||
| 969 | if rc == 0: | ||
| 970 | loopdev = out.split()[0] | ||
| 971 | devinst.register(loopdev) | ||
| 972 | if not loopdev or not os.path.exists(loopdev): | ||
| 973 | devinst.create() | ||
| 974 | loopdev = devinst.device | ||
| 975 | |||
| 976 | # setup a loop device for image file | ||
| 977 | rc = runner.show([losetupcmd, loopdev, lofile]) | ||
| 978 | if rc != 0: | ||
| 979 | raise MountError("Failed to setup loop device for '%s'" % lofile) | ||
| 980 | |||
| 981 | devinst.reg_atexit() | ||
| 982 | |||
| 983 | # try to save device and pid | ||
| 984 | makedirs(DEVICE_PIDFILE_DIR) | ||
| 985 | pidfile = os.path.join(DEVICE_PIDFILE_DIR, os.path.basename(loopdev)) | ||
| 986 | if os.path.exists(pidfile): | ||
| 987 | os.unlink(pidfile) | ||
| 988 | with open(pidfile, 'w') as wf: | ||
| 989 | wf.write(str(os.getpid())) | ||
| 990 | |||
| 991 | except MountError, err: | ||
| 992 | raise CreatorError("%s" % str(err)) | ||
| 993 | except: | ||
| 994 | raise | ||
| 995 | finally: | ||
| 996 | try: | ||
| 997 | fcntl.flock(fp, fcntl.LOCK_UN) | ||
| 998 | fp.close() | ||
| 999 | os.unlink(DEVICE_LOCKFILE) | ||
| 1000 | except: | ||
| 1001 | pass | ||
| 1002 | |||
| 1003 | return loopdev | ||
| 1004 | |||
| 1005 | def clean_loop_devices(piddir=DEVICE_PIDFILE_DIR): | ||
| 1006 | if not os.path.exists(piddir) or not os.path.isdir(piddir): | ||
| 1007 | return | ||
| 1008 | |||
| 1009 | for loopdev in os.listdir(piddir): | ||
| 1010 | pidfile = os.path.join(piddir, loopdev) | ||
| 1011 | try: | ||
| 1012 | with open(pidfile, 'r') as rf: | ||
| 1013 | devpid = int(rf.read()) | ||
| 1014 | except: | ||
| 1015 | devpid = None | ||
| 1016 | |||
| 1017 | # if the process using this device is alive, skip it | ||
| 1018 | if not devpid or os.path.exists(os.path.join('/proc', str(devpid))): | ||
| 1019 | continue | ||
| 1020 | |||
| 1021 | # try to clean it up | ||
| 1022 | try: | ||
| 1023 | devinst = LoopDevice() | ||
| 1024 | devinst.register(os.path.join('/dev', loopdev)) | ||
| 1025 | devinst.cleanup() | ||
| 1026 | os.unlink(pidfile) | ||
| 1027 | except: | ||
| 1028 | pass | ||
| 1029 | |||
diff --git a/scripts/lib/mic/utils/gpt_parser.py b/scripts/lib/mic/utils/gpt_parser.py new file mode 100644 index 0000000000..5d43b70778 --- /dev/null +++ b/scripts/lib/mic/utils/gpt_parser.py | |||
| @@ -0,0 +1,331 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2013 Intel, Inc. | ||
| 4 | # | ||
| 5 | # This program is free software; you can redistribute it and/or modify it | ||
| 6 | # under the terms of the GNU General Public License as published by the Free | ||
| 7 | # Software Foundation; version 2 of the License | ||
| 8 | # | ||
| 9 | # This program is distributed in the hope that it will be useful, but | ||
| 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 12 | # for more details. | ||
| 13 | # | ||
| 14 | # You should have received a copy of the GNU General Public License along | ||
| 15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | |||
| 18 | """ This module implements a simple GPT partitions parser which can read the | ||
| 19 | GPT header and the GPT partition table. """ | ||
| 20 | |||
| 21 | import struct | ||
| 22 | import uuid | ||
| 23 | import binascii | ||
| 24 | from mic.utils.errors import MountError | ||
| 25 | |||
| 26 | _GPT_HEADER_FORMAT = "<8s4sIIIQQQQ16sQIII" | ||
| 27 | _GPT_HEADER_SIZE = struct.calcsize(_GPT_HEADER_FORMAT) | ||
| 28 | _GPT_ENTRY_FORMAT = "<16s16sQQQ72s" | ||
| 29 | _GPT_ENTRY_SIZE = struct.calcsize(_GPT_ENTRY_FORMAT) | ||
| 30 | _SUPPORTED_GPT_REVISION = '\x00\x00\x01\x00' | ||
| 31 | |||
| 32 | def _stringify_uuid(binary_uuid): | ||
| 33 | """ A small helper function to transform a binary UUID into a string | ||
| 34 | format. """ | ||
| 35 | |||
| 36 | uuid_str = str(uuid.UUID(bytes_le = binary_uuid)) | ||
| 37 | |||
| 38 | return uuid_str.upper() | ||
| 39 | |||
| 40 | def _calc_header_crc(raw_hdr): | ||
| 41 | """ Calculate GPT header CRC32 checksum. The 'raw_hdr' parameter has to | ||
| 42 | be a list or a tuple containing all the elements of the GPT header in a | ||
| 43 | "raw" form, meaning that it should simply contain "unpacked" disk data. | ||
| 44 | """ | ||
| 45 | |||
| 46 | raw_hdr = list(raw_hdr) | ||
| 47 | raw_hdr[3] = 0 | ||
| 48 | raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr) | ||
| 49 | |||
| 50 | return binascii.crc32(raw_hdr) & 0xFFFFFFFF | ||
| 51 | |||
| 52 | def _validate_header(raw_hdr): | ||
| 53 | """ Validate the GPT header. The 'raw_hdr' parameter has to be a list or a | ||
| 54 | tuple containing all the elements of the GPT header in a "raw" form, | ||
| 55 | meaning that it should simply contain "unpacked" disk data. """ | ||
| 56 | |||
| 57 | # Validate the signature | ||
| 58 | if raw_hdr[0] != 'EFI PART': | ||
| 59 | raise MountError("GPT partition table not found") | ||
| 60 | |||
| 61 | # Validate the revision | ||
| 62 | if raw_hdr[1] != _SUPPORTED_GPT_REVISION: | ||
| 63 | raise MountError("Unsupported GPT revision '%s', supported revision " \ | ||
| 64 | "is '%s'" % \ | ||
| 65 | (binascii.hexlify(raw_hdr[1]), | ||
| 66 | binascii.hexlify(_SUPPORTED_GPT_REVISION))) | ||
| 67 | |||
| 68 | # Validate header size | ||
| 69 | if raw_hdr[2] != _GPT_HEADER_SIZE: | ||
| 70 | raise MountError("Bad GPT header size: %d bytes, expected %d" % \ | ||
| 71 | (raw_hdr[2], _GPT_HEADER_SIZE)) | ||
| 72 | |||
| 73 | crc = _calc_header_crc(raw_hdr) | ||
| 74 | if raw_hdr[3] != crc: | ||
| 75 | raise MountError("GPT header crc mismatch: %#x, should be %#x" % \ | ||
| 76 | (crc, raw_hdr[3])) | ||
| 77 | |||
| 78 | class GptParser: | ||
| 79 | """ GPT partition table parser. Allows reading the GPT header and the | ||
| 80 | partition table, as well as modifying the partition table records. """ | ||
| 81 | |||
| 82 | def __init__(self, disk_path, sector_size = 512): | ||
| 83 | """ The class constructor which accepts the following parameters: | ||
| 84 | * disk_path - full path to the disk image or device node | ||
| 85 | * sector_size - size of a disk sector in bytes """ | ||
| 86 | |||
| 87 | self.sector_size = sector_size | ||
| 88 | self.disk_path = disk_path | ||
| 89 | |||
| 90 | try: | ||
| 91 | self._disk_obj = open(disk_path, 'r+b') | ||
| 92 | except IOError as err: | ||
| 93 | raise MountError("Cannot open file '%s' for reading GPT " \ | ||
| 94 | "partitions: %s" % (disk_path, err)) | ||
| 95 | |||
| 96 | def __del__(self): | ||
| 97 | """ The class destructor. """ | ||
| 98 | |||
| 99 | self._disk_obj.close() | ||
| 100 | |||
| 101 | def _read_disk(self, offset, size): | ||
| 102 | """ A helper function which reads 'size' bytes from offset 'offset' of | ||
| 103 | the disk and checks all the error conditions. """ | ||
| 104 | |||
| 105 | self._disk_obj.seek(offset) | ||
| 106 | try: | ||
| 107 | data = self._disk_obj.read(size) | ||
| 108 | except IOError as err: | ||
| 109 | raise MountError("cannot read from '%s': %s" % \ | ||
| 110 | (self.disk_path, err)) | ||
| 111 | |||
| 112 | if len(data) != size: | ||
| 113 | raise MountError("cannot read %d bytes from offset '%d' of '%s', " \ | ||
| 114 | "read only %d bytes" % \ | ||
| 115 | (size, offset, self.disk_path, len(data))) | ||
| 116 | |||
| 117 | return data | ||
| 118 | |||
| 119 | def _write_disk(self, offset, buf): | ||
| 120 | """ A helper function which writes buffer 'buf' to offset 'offset' of | ||
| 121 | the disk. This function takes care of unaligned writes and checks all | ||
| 122 | the error conditions. """ | ||
| 123 | |||
| 124 | # Since we may be dealing with a block device, we only can write in | ||
| 125 | # 'self.sector_size' chunks. Find the aligned starting and ending | ||
| 126 | # disk offsets to read. | ||
| 127 | start = (offset / self.sector_size) * self.sector_size | ||
| 128 | end = ((start + len(buf)) / self.sector_size + 1) * self.sector_size | ||
| 129 | |||
| 130 | data = self._read_disk(start, end - start) | ||
| 131 | off = offset - start | ||
| 132 | data = data[:off] + buf + data[off + len(buf):] | ||
| 133 | |||
| 134 | self._disk_obj.seek(start) | ||
| 135 | try: | ||
| 136 | self._disk_obj.write(data) | ||
| 137 | except IOError as err: | ||
| 138 | raise MountError("cannot write to '%s': %s" % (self.disk_path, err)) | ||
| 139 | |||
| 140 | def read_header(self, primary = True): | ||
| 141 | """ Read and verify the GPT header and return a dictionary containing | ||
| 142 | the following elements: | ||
| 143 | |||
| 144 | 'signature' : header signature | ||
| 145 | 'revision' : header revision | ||
| 146 | 'hdr_size' : header size in bytes | ||
| 147 | 'hdr_crc' : header CRC32 | ||
| 148 | 'hdr_lba' : LBA of this header | ||
| 149 | 'hdr_offs' : byte disk offset of this header | ||
| 150 | 'backup_lba' : backup header LBA | ||
| 151 | 'backup_offs' : byte disk offset of backup header | ||
| 152 | 'first_lba' : first usable LBA for partitions | ||
| 153 | 'first_offs' : first usable byte disk offset for partitions | ||
| 154 | 'last_lba' : last usable LBA for partitions | ||
| 155 | 'last_offs' : last usable byte disk offset for partitions | ||
| 156 | 'disk_uuid' : UUID of the disk | ||
| 157 | 'ptable_lba' : starting LBA of array of partition entries | ||
| 158 | 'ptable_offs' : disk byte offset of the start of the partition table | ||
| 159 | 'ptable_size' : partition table size in bytes | ||
| 160 | 'entries_cnt' : number of available partition table entries | ||
| 161 | 'entry_size' : size of a single partition entry | ||
| 162 | 'ptable_crc' : CRC32 of the partition table | ||
| 163 | 'primary' : a boolean, if 'True', this is the primary GPT header, | ||
| 164 | if 'False' - the secondary | ||
| 165 | 'primary_str' : contains string "primary" if this is the primary GPT | ||
| 166 | header, and "backup" otherwise | ||
| 167 | |||
| 168 | This dictionary corresponds to the GPT header format. Please, see the | ||
| 169 | UEFI standard for the description of these fields. | ||
| 170 | |||
| 171 | If the 'primary' parameter is 'True', the primary GPT header is read, | ||
| 172 | otherwise the backup GPT header is read instead. """ | ||
| 173 | |||
| 174 | # Read and validate the primary GPT header | ||
| 175 | raw_hdr = self._read_disk(self.sector_size, _GPT_HEADER_SIZE) | ||
| 176 | raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr) | ||
| 177 | _validate_header(raw_hdr) | ||
| 178 | primary_str = "primary" | ||
| 179 | |||
| 180 | if not primary: | ||
| 181 | # Read and validate the backup GPT header | ||
| 182 | raw_hdr = self._read_disk(raw_hdr[6] * self.sector_size, _GPT_HEADER_SIZE) | ||
| 183 | raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr) | ||
| 184 | _validate_header(raw_hdr) | ||
| 185 | primary_str = "backup" | ||
| 186 | |||
| 187 | return { 'signature' : raw_hdr[0], | ||
| 188 | 'revision' : raw_hdr[1], | ||
| 189 | 'hdr_size' : raw_hdr[2], | ||
| 190 | 'hdr_crc' : raw_hdr[3], | ||
| 191 | 'hdr_lba' : raw_hdr[5], | ||
| 192 | 'hdr_offs' : raw_hdr[5] * self.sector_size, | ||
| 193 | 'backup_lba' : raw_hdr[6], | ||
| 194 | 'backup_offs' : raw_hdr[6] * self.sector_size, | ||
| 195 | 'first_lba' : raw_hdr[7], | ||
| 196 | 'first_offs' : raw_hdr[7] * self.sector_size, | ||
| 197 | 'last_lba' : raw_hdr[8], | ||
| 198 | 'last_offs' : raw_hdr[8] * self.sector_size, | ||
| 199 | 'disk_uuid' :_stringify_uuid(raw_hdr[9]), | ||
| 200 | 'ptable_lba' : raw_hdr[10], | ||
| 201 | 'ptable_offs' : raw_hdr[10] * self.sector_size, | ||
| 202 | 'ptable_size' : raw_hdr[11] * raw_hdr[12], | ||
| 203 | 'entries_cnt' : raw_hdr[11], | ||
| 204 | 'entry_size' : raw_hdr[12], | ||
| 205 | 'ptable_crc' : raw_hdr[13], | ||
| 206 | 'primary' : primary, | ||
| 207 | 'primary_str' : primary_str } | ||
| 208 | |||
| 209 | def _read_raw_ptable(self, header): | ||
| 210 | """ Read and validate primary or backup partition table. The 'header' | ||
| 211 | argument is the GPT header. If it is the primary GPT header, then the | ||
| 212 | primary partition table is read and validated, otherwise - the backup | ||
| 213 | one. The 'header' argument is a dictionary which is returned by the | ||
| 214 | 'read_header()' method. """ | ||
| 215 | |||
| 216 | raw_ptable = self._read_disk(header['ptable_offs'], | ||
| 217 | header['ptable_size']) | ||
| 218 | |||
| 219 | crc = binascii.crc32(raw_ptable) & 0xFFFFFFFF | ||
| 220 | if crc != header['ptable_crc']: | ||
| 221 | raise MountError("Partition table at LBA %d (%s) is corrupted" % \ | ||
| 222 | (header['ptable_lba'], header['primary_str'])) | ||
| 223 | |||
| 224 | return raw_ptable | ||
| 225 | |||
| 226 | def get_partitions(self, primary = True): | ||
| 227 | """ This is a generator which parses the GPT partition table and | ||
| 228 | generates the following dictionary for each partition: | ||
| 229 | |||
| 230 | 'index' : the index of the partition table endry | ||
| 231 | 'offs' : byte disk offset of the partition table entry | ||
| 232 | 'type_uuid' : partition type UUID | ||
| 233 | 'part_uuid' : partition UUID | ||
| 234 | 'first_lba' : the first LBA | ||
| 235 | 'last_lba' : the last LBA | ||
| 236 | 'flags' : attribute flags | ||
| 237 | 'name' : partition name | ||
| 238 | 'primary' : a boolean, if 'True', this is the primary partition | ||
| 239 | table, if 'False' - the secondary | ||
| 240 | 'primary_str' : contains string "primary" if this is the primary GPT | ||
| 241 | header, and "backup" otherwise | ||
| 242 | |||
| 243 | This dictionary corresponds to the GPT header format. Please, see the | ||
| 244 | UEFI standard for the description of these fields. | ||
| 245 | |||
| 246 | If the 'primary' parameter is 'True', partitions from the primary GPT | ||
| 247 | partition table are generated, otherwise partitions from the backup GPT | ||
| 248 | partition table are generated. """ | ||
| 249 | |||
| 250 | if primary: | ||
| 251 | primary_str = "primary" | ||
| 252 | else: | ||
| 253 | primary_str = "backup" | ||
| 254 | |||
| 255 | header = self.read_header(primary) | ||
| 256 | raw_ptable = self._read_raw_ptable(header) | ||
| 257 | |||
| 258 | for index in xrange(0, header['entries_cnt']): | ||
| 259 | start = header['entry_size'] * index | ||
| 260 | end = start + header['entry_size'] | ||
| 261 | raw_entry = struct.unpack(_GPT_ENTRY_FORMAT, raw_ptable[start:end]) | ||
| 262 | |||
| 263 | if raw_entry[2] == 0 or raw_entry[3] == 0: | ||
| 264 | continue | ||
| 265 | |||
| 266 | part_name = str(raw_entry[5].decode('UTF-16').split('\0', 1)[0]) | ||
| 267 | |||
| 268 | yield { 'index' : index, | ||
| 269 | 'offs' : header['ptable_offs'] + start, | ||
| 270 | 'type_uuid' : _stringify_uuid(raw_entry[0]), | ||
| 271 | 'part_uuid' : _stringify_uuid(raw_entry[1]), | ||
| 272 | 'first_lba' : raw_entry[2], | ||
| 273 | 'last_lba' : raw_entry[3], | ||
| 274 | 'flags' : raw_entry[4], | ||
| 275 | 'name' : part_name, | ||
| 276 | 'primary' : primary, | ||
| 277 | 'primary_str' : primary_str } | ||
| 278 | |||
| 279 | def _change_partition(self, header, entry): | ||
| 280 | """ A helper function for 'change_partitions()' which changes a | ||
| 281 | a paricular instance of the partition table (primary or backup). """ | ||
| 282 | |||
| 283 | if entry['index'] >= header['entries_cnt']: | ||
| 284 | raise MountError("Partition table at LBA %d has only %d " \ | ||
| 285 | "records cannot change record number %d" % \ | ||
| 286 | (header['entries_cnt'], entry['index'])) | ||
| 287 | # Read raw GPT header | ||
| 288 | raw_hdr = self._read_disk(header['hdr_offs'], _GPT_HEADER_SIZE) | ||
| 289 | raw_hdr = list(struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)) | ||
| 290 | _validate_header(raw_hdr) | ||
| 291 | |||
| 292 | # Prepare the new partition table entry | ||
| 293 | raw_entry = struct.pack(_GPT_ENTRY_FORMAT, | ||
| 294 | uuid.UUID(entry['type_uuid']).bytes_le, | ||
| 295 | uuid.UUID(entry['part_uuid']).bytes_le, | ||
| 296 | entry['first_lba'], | ||
| 297 | entry['last_lba'], | ||
| 298 | entry['flags'], | ||
| 299 | entry['name'].encode('UTF-16')) | ||
| 300 | |||
| 301 | # Write the updated entry to the disk | ||
| 302 | entry_offs = header['ptable_offs'] + \ | ||
| 303 | header['entry_size'] * entry['index'] | ||
| 304 | self._write_disk(entry_offs, raw_entry) | ||
| 305 | |||
| 306 | # Calculate and update partition table CRC32 | ||
| 307 | raw_ptable = self._read_disk(header['ptable_offs'], | ||
| 308 | header['ptable_size']) | ||
| 309 | raw_hdr[13] = binascii.crc32(raw_ptable) & 0xFFFFFFFF | ||
| 310 | |||
| 311 | # Calculate and update the GPT header CRC | ||
| 312 | raw_hdr[3] = _calc_header_crc(raw_hdr) | ||
| 313 | |||
| 314 | # Write the updated header to the disk | ||
| 315 | raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr) | ||
| 316 | self._write_disk(header['hdr_offs'], raw_hdr) | ||
| 317 | |||
| 318 | def change_partition(self, entry): | ||
| 319 | """ Change a GPT partition. The 'entry' argument has the same format as | ||
| 320 | 'get_partitions()' returns. This function simply changes the partition | ||
| 321 | table record corresponding to 'entry' in both, the primary and the | ||
| 322 | backup GPT partition tables. The parition table CRC is re-calculated | ||
| 323 | and the GPT headers are modified accordingly. """ | ||
| 324 | |||
| 325 | # Change the primary partition table | ||
| 326 | header = self.read_header(True) | ||
| 327 | self._change_partition(header, entry) | ||
| 328 | |||
| 329 | # Change the backup partition table | ||
| 330 | header = self.read_header(False) | ||
| 331 | self._change_partition(header, entry) | ||
diff --git a/scripts/lib/mic/utils/grabber.py b/scripts/lib/mic/utils/grabber.py new file mode 100644 index 0000000000..45e30b4fb0 --- /dev/null +++ b/scripts/lib/mic/utils/grabber.py | |||
| @@ -0,0 +1,97 @@ | |||
| 1 | #!/usr/bin/python | ||
| 2 | |||
| 3 | import os | ||
| 4 | import sys | ||
| 5 | import rpm | ||
| 6 | import fcntl | ||
| 7 | import struct | ||
| 8 | import termios | ||
| 9 | |||
| 10 | from mic import msger | ||
| 11 | from mic.utils import runner | ||
| 12 | from mic.utils.errors import CreatorError | ||
| 13 | |||
| 14 | from urlgrabber import grabber | ||
| 15 | from urlgrabber import __version__ as grabber_version | ||
| 16 | |||
| 17 | if rpm.labelCompare(grabber_version.split('.'), '3.9.0'.split('.')) == -1: | ||
| 18 | msger.warning("Version of python-urlgrabber is %s, lower than '3.9.0', " | ||
| 19 | "you may encounter some network issues" % grabber_version) | ||
| 20 | |||
| 21 | def myurlgrab(url, filename, proxies, progress_obj = None): | ||
| 22 | g = grabber.URLGrabber() | ||
| 23 | if progress_obj is None: | ||
| 24 | progress_obj = TextProgress() | ||
| 25 | |||
| 26 | if url.startswith("file:/"): | ||
| 27 | filepath = "/%s" % url.replace("file:", "").lstrip('/') | ||
| 28 | if not os.path.exists(filepath): | ||
| 29 | raise CreatorError("URLGrabber error: can't find file %s" % url) | ||
| 30 | if url.endswith('.rpm'): | ||
| 31 | return filepath | ||
| 32 | else: | ||
| 33 | # untouch repometadata in source path | ||
| 34 | runner.show(['cp', '-f', filepath, filename]) | ||
| 35 | |||
| 36 | else: | ||
| 37 | try: | ||
| 38 | filename = g.urlgrab(url=str(url), | ||
| 39 | filename=filename, | ||
| 40 | ssl_verify_host=False, | ||
| 41 | ssl_verify_peer=False, | ||
| 42 | proxies=proxies, | ||
| 43 | http_headers=(('Pragma', 'no-cache'),), | ||
| 44 | quote=0, | ||
| 45 | progress_obj=progress_obj) | ||
| 46 | except grabber.URLGrabError, err: | ||
| 47 | msg = str(err) | ||
| 48 | if msg.find(url) < 0: | ||
| 49 | msg += ' on %s' % url | ||
| 50 | raise CreatorError(msg) | ||
| 51 | |||
| 52 | return filename | ||
| 53 | |||
| 54 | def terminal_width(fd=1): | ||
| 55 | """ Get the real terminal width """ | ||
| 56 | try: | ||
| 57 | buf = 'abcdefgh' | ||
| 58 | buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf) | ||
| 59 | return struct.unpack('hhhh', buf)[1] | ||
| 60 | except: # IOError | ||
| 61 | return 80 | ||
| 62 | |||
| 63 | def truncate_url(url, width): | ||
| 64 | return os.path.basename(url)[0:width] | ||
| 65 | |||
| 66 | class TextProgress(object): | ||
| 67 | # make the class as singleton | ||
| 68 | _instance = None | ||
| 69 | def __new__(cls, *args, **kwargs): | ||
| 70 | if not cls._instance: | ||
| 71 | cls._instance = super(TextProgress, cls).__new__(cls, *args, **kwargs) | ||
| 72 | |||
| 73 | return cls._instance | ||
| 74 | |||
| 75 | def __init__(self, totalnum = None): | ||
| 76 | self.total = totalnum | ||
| 77 | self.counter = 1 | ||
| 78 | |||
| 79 | def start(self, filename, url, *args, **kwargs): | ||
| 80 | self.url = url | ||
| 81 | self.termwidth = terminal_width() | ||
| 82 | msger.info("\r%-*s" % (self.termwidth, " ")) | ||
| 83 | if self.total is None: | ||
| 84 | msger.info("\rRetrieving %s ..." % truncate_url(self.url, self.termwidth - 15)) | ||
| 85 | else: | ||
| 86 | msger.info("\rRetrieving %s [%d/%d] ..." % (truncate_url(self.url, self.termwidth - 25), self.counter, self.total)) | ||
| 87 | |||
| 88 | def update(self, *args): | ||
| 89 | pass | ||
| 90 | |||
| 91 | def end(self, *args): | ||
| 92 | if self.counter == self.total: | ||
| 93 | msger.raw("\n") | ||
| 94 | |||
| 95 | if self.total is not None: | ||
| 96 | self.counter += 1 | ||
| 97 | |||
diff --git a/scripts/lib/mic/utils/misc.py b/scripts/lib/mic/utils/misc.py new file mode 100644 index 0000000000..63024346a9 --- /dev/null +++ b/scripts/lib/mic/utils/misc.py | |||
| @@ -0,0 +1,1067 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2010, 2011 Intel Inc. | ||
| 4 | # | ||
| 5 | # This program is free software; you can redistribute it and/or modify it | ||
| 6 | # under the terms of the GNU General Public License as published by the Free | ||
| 7 | # Software Foundation; version 2 of the License | ||
| 8 | # | ||
| 9 | # This program is distributed in the hope that it will be useful, but | ||
| 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 12 | # for more details. | ||
| 13 | # | ||
| 14 | # You should have received a copy of the GNU General Public License along | ||
| 15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | |||
| 18 | import os | ||
| 19 | import sys | ||
| 20 | import time | ||
| 21 | import tempfile | ||
| 22 | import re | ||
| 23 | import shutil | ||
| 24 | import glob | ||
| 25 | import hashlib | ||
| 26 | import subprocess | ||
| 27 | import platform | ||
| 28 | import traceback | ||
| 29 | |||
| 30 | |||
| 31 | try: | ||
| 32 | import sqlite3 as sqlite | ||
| 33 | except ImportError: | ||
| 34 | import sqlite | ||
| 35 | |||
| 36 | try: | ||
| 37 | from xml.etree import cElementTree | ||
| 38 | except ImportError: | ||
| 39 | import cElementTree | ||
| 40 | xmlparse = cElementTree.parse | ||
| 41 | |||
| 42 | from mic import msger | ||
| 43 | from mic.utils.errors import CreatorError, SquashfsError | ||
| 44 | from mic.utils.fs_related import find_binary_path, makedirs | ||
| 45 | from mic.utils.grabber import myurlgrab | ||
| 46 | from mic.utils.proxy import get_proxy_for | ||
| 47 | from mic.utils import runner | ||
| 48 | from mic.utils import rpmmisc | ||
| 49 | |||
| 50 | |||
| 51 | RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)") | ||
| 52 | RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s" | ||
| 53 | SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm") | ||
| 54 | |||
| 55 | |||
| 56 | def build_name(kscfg, release=None, prefix = None, suffix = None): | ||
| 57 | """Construct and return an image name string. | ||
| 58 | |||
| 59 | This is a utility function to help create sensible name and fslabel | ||
| 60 | strings. The name is constructed using the sans-prefix-and-extension | ||
| 61 | kickstart filename and the supplied prefix and suffix. | ||
| 62 | |||
| 63 | kscfg -- a path to a kickstart file | ||
| 64 | release -- a replacement to suffix for image release | ||
| 65 | prefix -- a prefix to prepend to the name; defaults to None, which causes | ||
| 66 | no prefix to be used | ||
| 67 | suffix -- a suffix to append to the name; defaults to None, which causes | ||
| 68 | a YYYYMMDDHHMM suffix to be used | ||
| 69 | |||
| 70 | Note, if maxlen is less then the len(suffix), you get to keep both pieces. | ||
| 71 | |||
| 72 | """ | ||
| 73 | name = os.path.basename(kscfg) | ||
| 74 | idx = name.rfind('.') | ||
| 75 | if idx >= 0: | ||
| 76 | name = name[:idx] | ||
| 77 | |||
| 78 | if release is not None: | ||
| 79 | suffix = "" | ||
| 80 | if prefix is None: | ||
| 81 | prefix = "" | ||
| 82 | if suffix is None: | ||
| 83 | suffix = time.strftime("%Y%m%d%H%M") | ||
| 84 | |||
| 85 | if name.startswith(prefix): | ||
| 86 | name = name[len(prefix):] | ||
| 87 | |||
| 88 | prefix = "%s-" % prefix if prefix else "" | ||
| 89 | suffix = "-%s" % suffix if suffix else "" | ||
| 90 | |||
| 91 | ret = prefix + name + suffix | ||
| 92 | return ret | ||
| 93 | |||
| 94 | def get_distro(): | ||
| 95 | """Detect linux distribution, support "meego" | ||
| 96 | """ | ||
| 97 | |||
| 98 | support_dists = ('SuSE', | ||
| 99 | 'debian', | ||
| 100 | 'fedora', | ||
| 101 | 'redhat', | ||
| 102 | 'centos', | ||
| 103 | 'meego', | ||
| 104 | 'moblin', | ||
| 105 | 'tizen') | ||
| 106 | try: | ||
| 107 | (dist, ver, id) = platform.linux_distribution( \ | ||
| 108 | supported_dists = support_dists) | ||
| 109 | except: | ||
| 110 | (dist, ver, id) = platform.dist( \ | ||
| 111 | supported_dists = support_dists) | ||
| 112 | |||
| 113 | return (dist, ver, id) | ||
| 114 | |||
| 115 | def get_distro_str(): | ||
| 116 | """Get composited string for current linux distribution | ||
| 117 | """ | ||
| 118 | (dist, ver, id) = get_distro() | ||
| 119 | |||
| 120 | if not dist: | ||
| 121 | return 'Unknown Linux Distro' | ||
| 122 | else: | ||
| 123 | distro_str = ' '.join(map(str.strip, (dist, ver, id))) | ||
| 124 | return distro_str.strip() | ||
| 125 | |||
| 126 | _LOOP_RULE_PTH = None | ||
| 127 | |||
| 128 | def hide_loopdev_presentation(): | ||
| 129 | udev_rules = "80-prevent-loop-present.rules" | ||
| 130 | udev_rules_dir = [ | ||
| 131 | '/usr/lib/udev/rules.d/', | ||
| 132 | '/lib/udev/rules.d/', | ||
| 133 | '/etc/udev/rules.d/' | ||
| 134 | ] | ||
| 135 | |||
| 136 | global _LOOP_RULE_PTH | ||
| 137 | |||
| 138 | for rdir in udev_rules_dir: | ||
| 139 | if os.path.exists(rdir): | ||
| 140 | _LOOP_RULE_PTH = os.path.join(rdir, udev_rules) | ||
| 141 | |||
| 142 | if not _LOOP_RULE_PTH: | ||
| 143 | return | ||
| 144 | |||
| 145 | try: | ||
| 146 | with open(_LOOP_RULE_PTH, 'w') as wf: | ||
| 147 | wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"') | ||
| 148 | |||
| 149 | runner.quiet('udevadm trigger') | ||
| 150 | except: | ||
| 151 | pass | ||
| 152 | |||
| 153 | def unhide_loopdev_presentation(): | ||
| 154 | global _LOOP_RULE_PTH | ||
| 155 | |||
| 156 | if not _LOOP_RULE_PTH: | ||
| 157 | return | ||
| 158 | |||
| 159 | try: | ||
| 160 | os.unlink(_LOOP_RULE_PTH) | ||
| 161 | runner.quiet('udevadm trigger') | ||
| 162 | except: | ||
| 163 | pass | ||
| 164 | |||
| 165 | def extract_rpm(rpmfile, targetdir): | ||
| 166 | rpm2cpio = find_binary_path("rpm2cpio") | ||
| 167 | cpio = find_binary_path("cpio") | ||
| 168 | |||
| 169 | olddir = os.getcwd() | ||
| 170 | os.chdir(targetdir) | ||
| 171 | |||
| 172 | msger.verbose("Extract rpm file with cpio: %s" % rpmfile) | ||
| 173 | p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE) | ||
| 174 | p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout, | ||
| 175 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) | ||
| 176 | (sout, serr) = p2.communicate() | ||
| 177 | msger.verbose(sout or serr) | ||
| 178 | |||
| 179 | os.chdir(olddir) | ||
| 180 | |||
| 181 | def compressing(fpath, method): | ||
| 182 | comp_map = { | ||
| 183 | "gz": "gzip", | ||
| 184 | "bz2": "bzip2" | ||
| 185 | } | ||
| 186 | if method not in comp_map: | ||
| 187 | raise CreatorError("Unsupport compress format: %s, valid values: %s" | ||
| 188 | % (method, ','.join(comp_map.keys()))) | ||
| 189 | cmd = find_binary_path(comp_map[method]) | ||
| 190 | rc = runner.show([cmd, "-f", fpath]) | ||
| 191 | if rc: | ||
| 192 | raise CreatorError("Failed to %s file: %s" % (comp_map[method], fpath)) | ||
| 193 | |||
| 194 | def taring(dstfile, target): | ||
| 195 | import tarfile | ||
| 196 | basen, ext = os.path.splitext(dstfile) | ||
| 197 | comp = {".tar": None, | ||
| 198 | ".gz": "gz", # for .tar.gz | ||
| 199 | ".bz2": "bz2", # for .tar.bz2 | ||
| 200 | ".tgz": "gz", | ||
| 201 | ".tbz": "bz2"}[ext] | ||
| 202 | |||
| 203 | # specify tarball file path | ||
| 204 | if not comp: | ||
| 205 | tarpath = dstfile | ||
| 206 | elif basen.endswith(".tar"): | ||
| 207 | tarpath = basen | ||
| 208 | else: | ||
| 209 | tarpath = basen + ".tar" | ||
| 210 | wf = tarfile.open(tarpath, 'w') | ||
| 211 | |||
| 212 | if os.path.isdir(target): | ||
| 213 | for item in os.listdir(target): | ||
| 214 | wf.add(os.path.join(target, item), item) | ||
| 215 | else: | ||
| 216 | wf.add(target, os.path.basename(target)) | ||
| 217 | wf.close() | ||
| 218 | |||
| 219 | if comp: | ||
| 220 | compressing(tarpath, comp) | ||
| 221 | # when dstfile ext is ".tgz" and ".tbz", should rename | ||
| 222 | if not basen.endswith(".tar"): | ||
| 223 | shutil.move("%s.%s" % (tarpath, comp), dstfile) | ||
| 224 | |||
| 225 | def ziping(dstfile, target): | ||
| 226 | import zipfile | ||
| 227 | wf = zipfile.ZipFile(dstfile, 'w', compression=zipfile.ZIP_DEFLATED) | ||
| 228 | if os.path.isdir(target): | ||
| 229 | for item in os.listdir(target): | ||
| 230 | fpath = os.path.join(target, item) | ||
| 231 | if not os.path.isfile(fpath): | ||
| 232 | continue | ||
| 233 | wf.write(fpath, item, zipfile.ZIP_DEFLATED) | ||
| 234 | else: | ||
| 235 | wf.write(target, os.path.basename(target), zipfile.ZIP_DEFLATED) | ||
| 236 | wf.close() | ||
| 237 | |||
| 238 | pack_formats = { | ||
| 239 | ".tar": taring, | ||
| 240 | ".tar.gz": taring, | ||
| 241 | ".tar.bz2": taring, | ||
| 242 | ".tgz": taring, | ||
| 243 | ".tbz": taring, | ||
| 244 | ".zip": ziping, | ||
| 245 | } | ||
| 246 | |||
| 247 | def packing(dstfile, target): | ||
| 248 | (base, ext) = os.path.splitext(dstfile) | ||
| 249 | if ext in (".gz", ".bz2") and base.endswith(".tar"): | ||
| 250 | ext = ".tar" + ext | ||
| 251 | if ext not in pack_formats: | ||
| 252 | raise CreatorError("Unsupport pack format: %s, valid values: %s" | ||
| 253 | % (ext, ','.join(pack_formats.keys()))) | ||
| 254 | func = pack_formats[ext] | ||
| 255 | # func should be callable | ||
| 256 | func(dstfile, target) | ||
| 257 | |||
| 258 | def human_size(size): | ||
| 259 | """Return human readable string for Bytes size | ||
| 260 | """ | ||
| 261 | |||
| 262 | if size <= 0: | ||
| 263 | return "0M" | ||
| 264 | import math | ||
| 265 | measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] | ||
| 266 | expo = int(math.log(size, 1024)) | ||
| 267 | mant = float(size/math.pow(1024, expo)) | ||
| 268 | return "{0:.1f}{1:s}".format(mant, measure[expo]) | ||
| 269 | |||
| 270 | def get_block_size(file_obj): | ||
| 271 | """ Returns block size for file object 'file_obj'. Errors are indicated by | ||
| 272 | the 'IOError' exception. """ | ||
| 273 | |||
| 274 | from fcntl import ioctl | ||
| 275 | import struct | ||
| 276 | |||
| 277 | # Get the block size of the host file-system for the image file by calling | ||
| 278 | # the FIGETBSZ ioctl (number 2). | ||
| 279 | binary_data = ioctl(file_obj, 2, struct.pack('I', 0)) | ||
| 280 | return struct.unpack('I', binary_data)[0] | ||
| 281 | |||
| 282 | def check_space_pre_cp(src, dst): | ||
| 283 | """Check whether disk space is enough before 'cp' like | ||
| 284 | operations, else exception will be raised. | ||
| 285 | """ | ||
| 286 | |||
| 287 | srcsize = get_file_size(src) * 1024 * 1024 | ||
| 288 | freesize = get_filesystem_avail(dst) | ||
| 289 | if srcsize > freesize: | ||
| 290 | raise CreatorError("space on %s(%s) is not enough for about %s files" | ||
| 291 | % (dst, human_size(freesize), human_size(srcsize))) | ||
| 292 | |||
| 293 | def calc_hashes(file_path, hash_names, start = 0, end = None): | ||
| 294 | """ Calculate hashes for a file. The 'file_path' argument is the file | ||
| 295 | to calculate hash functions for, 'start' and 'end' are the starting and | ||
| 296 | ending file offset to calculate the has functions for. The 'hash_names' | ||
| 297 | argument is a list of hash names to calculate. Returns the the list | ||
| 298 | of calculated hash values in the hexadecimal form in the same order | ||
| 299 | as 'hash_names'. | ||
| 300 | """ | ||
| 301 | if end == None: | ||
| 302 | end = os.path.getsize(file_path) | ||
| 303 | |||
| 304 | chunk_size = 65536 | ||
| 305 | to_read = end - start | ||
| 306 | read = 0 | ||
| 307 | |||
| 308 | hashes = [] | ||
| 309 | for hash_name in hash_names: | ||
| 310 | hashes.append(hashlib.new(hash_name)) | ||
| 311 | |||
| 312 | with open(file_path, "rb") as f: | ||
| 313 | f.seek(start) | ||
| 314 | |||
| 315 | while read < to_read: | ||
| 316 | if read + chunk_size > to_read: | ||
| 317 | chunk_size = to_read - read | ||
| 318 | chunk = f.read(chunk_size) | ||
| 319 | for hash_obj in hashes: | ||
| 320 | hash_obj.update(chunk) | ||
| 321 | read += chunk_size | ||
| 322 | |||
| 323 | result = [] | ||
| 324 | for hash_obj in hashes: | ||
| 325 | result.append(hash_obj.hexdigest()) | ||
| 326 | |||
| 327 | return result | ||
| 328 | |||
| 329 | def get_md5sum(fpath): | ||
| 330 | return calc_hashes(fpath, ('md5', ))[0] | ||
| 331 | |||
| 332 | |||
| 333 | def normalize_ksfile(ksconf, release, arch): | ||
| 334 | ''' | ||
| 335 | Return the name of a normalized ks file in which macro variables | ||
| 336 | @BUILD_ID@ and @ARCH@ are replace with real values. | ||
| 337 | |||
| 338 | The original ks file is returned if no special macro is used, otherwise | ||
| 339 | a temp file is created and returned, which will be deleted when program | ||
| 340 | exits normally. | ||
| 341 | ''' | ||
| 342 | |||
| 343 | if not release: | ||
| 344 | release = "latest" | ||
| 345 | if not arch or re.match(r'i.86', arch): | ||
| 346 | arch = "ia32" | ||
| 347 | |||
| 348 | with open(ksconf) as f: | ||
| 349 | ksc = f.read() | ||
| 350 | |||
| 351 | if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc: | ||
| 352 | return ksconf | ||
| 353 | |||
| 354 | msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf) | ||
| 355 | ksc = ksc.replace("@ARCH@", arch) | ||
| 356 | ksc = ksc.replace("@BUILD_ID@", release) | ||
| 357 | |||
| 358 | fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf)) | ||
| 359 | os.write(fd, ksc) | ||
| 360 | os.close(fd) | ||
| 361 | |||
| 362 | msger.debug('normalized ks file:%s' % ksconf) | ||
| 363 | |||
| 364 | def remove_temp_ks(): | ||
| 365 | try: | ||
| 366 | os.unlink(ksconf) | ||
| 367 | except OSError, err: | ||
| 368 | msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err)) | ||
| 369 | |||
| 370 | import atexit | ||
| 371 | atexit.register(remove_temp_ks) | ||
| 372 | |||
| 373 | return ksconf | ||
| 374 | |||
| 375 | |||
| 376 | def _check_mic_chroot(rootdir): | ||
| 377 | def _path(path): | ||
| 378 | return rootdir.rstrip('/') + path | ||
| 379 | |||
| 380 | release_files = map(_path, [ "/etc/moblin-release", | ||
| 381 | "/etc/meego-release", | ||
| 382 | "/etc/tizen-release"]) | ||
| 383 | |||
| 384 | if not any(map(os.path.exists, release_files)): | ||
| 385 | msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir) | ||
| 386 | |||
| 387 | if not glob.glob(rootdir + "/boot/vmlinuz-*"): | ||
| 388 | msger.warning("Failed to find kernel module under %s" % rootdir) | ||
| 389 | |||
| 390 | return | ||
| 391 | |||
| 392 | def selinux_check(arch, fstypes): | ||
| 393 | try: | ||
| 394 | getenforce = find_binary_path('getenforce') | ||
| 395 | except CreatorError: | ||
| 396 | return | ||
| 397 | |||
| 398 | selinux_status = runner.outs([getenforce]) | ||
| 399 | if arch and arch.startswith("arm") and selinux_status == "Enforcing": | ||
| 400 | raise CreatorError("Can't create arm image if selinux is enabled, " | ||
| 401 | "please run 'setenforce 0' to disable selinux") | ||
| 402 | |||
| 403 | use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes) | ||
| 404 | if use_btrfs and selinux_status == "Enforcing": | ||
| 405 | raise CreatorError("Can't create btrfs image if selinux is enabled," | ||
| 406 | " please run 'setenforce 0' to disable selinux") | ||
| 407 | |||
| 408 | def get_image_type(path): | ||
| 409 | def _get_extension_name(path): | ||
| 410 | match = re.search("(?<=\.)\w+$", path) | ||
| 411 | if match: | ||
| 412 | return match.group(0) | ||
| 413 | else: | ||
| 414 | return None | ||
| 415 | |||
| 416 | if os.path.isdir(path): | ||
| 417 | _check_mic_chroot(path) | ||
| 418 | return "fs" | ||
| 419 | |||
| 420 | maptab = { | ||
| 421 | "tar": "loop", | ||
| 422 | "raw":"raw", | ||
| 423 | "vmdk":"vmdk", | ||
| 424 | "vdi":"vdi", | ||
| 425 | "iso":"livecd", | ||
| 426 | "usbimg":"liveusb", | ||
| 427 | } | ||
| 428 | |||
| 429 | extension = _get_extension_name(path) | ||
| 430 | if extension in maptab: | ||
| 431 | return maptab[extension] | ||
| 432 | |||
| 433 | fd = open(path, "rb") | ||
| 434 | file_header = fd.read(1024) | ||
| 435 | fd.close() | ||
| 436 | vdi_flag = "<<< Sun VirtualBox Disk Image >>>" | ||
| 437 | if file_header[0:len(vdi_flag)] == vdi_flag: | ||
| 438 | return maptab["vdi"] | ||
| 439 | |||
| 440 | output = runner.outs(['file', path]) | ||
| 441 | isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*") | ||
| 442 | usbimgptn = re.compile(r".*x86 boot sector.*active.*") | ||
| 443 | rawptn = re.compile(r".*x86 boot sector.*") | ||
| 444 | vmdkptn = re.compile(r".*VMware. disk image.*") | ||
| 445 | ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*") | ||
| 446 | ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*") | ||
| 447 | btrfsimgptn = re.compile(r".*BTRFS.*") | ||
| 448 | if isoptn.match(output): | ||
| 449 | return maptab["iso"] | ||
| 450 | elif usbimgptn.match(output): | ||
| 451 | return maptab["usbimg"] | ||
| 452 | elif rawptn.match(output): | ||
| 453 | return maptab["raw"] | ||
| 454 | elif vmdkptn.match(output): | ||
| 455 | return maptab["vmdk"] | ||
| 456 | elif ext3fsimgptn.match(output): | ||
| 457 | return "ext3fsimg" | ||
| 458 | elif ext4fsimgptn.match(output): | ||
| 459 | return "ext4fsimg" | ||
| 460 | elif btrfsimgptn.match(output): | ||
| 461 | return "btrfsimg" | ||
| 462 | else: | ||
| 463 | raise CreatorError("Cannot detect the type of image: %s" % path) | ||
| 464 | |||
| 465 | |||
| 466 | def get_file_size(filename): | ||
| 467 | """ Return size in MB unit """ | ||
| 468 | cmd = ['du', "-s", "-b", "-B", "1M", filename] | ||
| 469 | rc, duOutput = runner.runtool(cmd) | ||
| 470 | if rc != 0: | ||
| 471 | raise CreatorError("Failed to run: %s" % ' '.join(cmd)) | ||
| 472 | size1 = int(duOutput.split()[0]) | ||
| 473 | |||
| 474 | cmd = ['du', "-s", "-B", "1M", filename] | ||
| 475 | rc, duOutput = runner.runtool(cmd) | ||
| 476 | if rc != 0: | ||
| 477 | raise CreatorError("Failed to run: %s" % ' '.join(cmd)) | ||
| 478 | |||
| 479 | size2 = int(duOutput.split()[0]) | ||
| 480 | return max(size1, size2) | ||
| 481 | |||
| 482 | |||
| 483 | def get_filesystem_avail(fs): | ||
| 484 | vfstat = os.statvfs(fs) | ||
| 485 | return vfstat.f_bavail * vfstat.f_bsize | ||
| 486 | |||
| 487 | def convert_image(srcimg, srcfmt, dstimg, dstfmt): | ||
| 488 | #convert disk format | ||
| 489 | if dstfmt != "raw": | ||
| 490 | raise CreatorError("Invalid destination image format: %s" % dstfmt) | ||
| 491 | msger.debug("converting %s image to %s" % (srcimg, dstimg)) | ||
| 492 | if srcfmt == "vmdk": | ||
| 493 | path = find_binary_path("qemu-img") | ||
| 494 | argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg] | ||
| 495 | elif srcfmt == "vdi": | ||
| 496 | path = find_binary_path("VBoxManage") | ||
| 497 | argv = [path, "internalcommands", "converttoraw", srcimg, dstimg] | ||
| 498 | else: | ||
| 499 | raise CreatorError("Invalid soure image format: %s" % srcfmt) | ||
| 500 | |||
| 501 | rc = runner.show(argv) | ||
| 502 | if rc == 0: | ||
| 503 | msger.debug("convert successful") | ||
| 504 | if rc != 0: | ||
| 505 | raise CreatorError("Unable to convert disk to %s" % dstfmt) | ||
| 506 | |||
| 507 | def uncompress_squashfs(squashfsimg, outdir): | ||
| 508 | """Uncompress file system from squshfs image""" | ||
| 509 | unsquashfs = find_binary_path("unsquashfs") | ||
| 510 | args = [ unsquashfs, "-d", outdir, squashfsimg ] | ||
| 511 | rc = runner.show(args) | ||
| 512 | if (rc != 0): | ||
| 513 | raise SquashfsError("Failed to uncompress %s." % squashfsimg) | ||
| 514 | |||
| 515 | def mkdtemp(dir = "/var/tmp", prefix = "mic-tmp-"): | ||
| 516 | """ FIXME: use the dir in mic.conf instead """ | ||
| 517 | |||
| 518 | makedirs(dir) | ||
| 519 | return tempfile.mkdtemp(dir = dir, prefix = prefix) | ||
| 520 | |||
| 521 | def get_repostrs_from_ks(ks): | ||
| 522 | def _get_temp_reponame(baseurl): | ||
| 523 | md5obj = hashlib.md5(baseurl) | ||
| 524 | tmpreponame = "%s" % md5obj.hexdigest() | ||
| 525 | return tmpreponame | ||
| 526 | |||
| 527 | kickstart_repos = [] | ||
| 528 | |||
| 529 | for repodata in ks.handler.repo.repoList: | ||
| 530 | repo = {} | ||
| 531 | for attr in ('name', | ||
| 532 | 'baseurl', | ||
| 533 | 'mirrorlist', | ||
| 534 | 'includepkgs', # val is list | ||
| 535 | 'excludepkgs', # val is list | ||
| 536 | 'cost', # int | ||
| 537 | 'priority',# int | ||
| 538 | 'save', | ||
| 539 | 'proxy', | ||
| 540 | 'proxyuser', | ||
| 541 | 'proxypasswd', | ||
| 542 | 'proxypasswd', | ||
| 543 | 'debuginfo', | ||
| 544 | 'source', | ||
| 545 | 'gpgkey', | ||
| 546 | 'ssl_verify'): | ||
| 547 | if hasattr(repodata, attr) and getattr(repodata, attr): | ||
| 548 | repo[attr] = getattr(repodata, attr) | ||
| 549 | |||
| 550 | if 'name' not in repo: | ||
| 551 | repo['name'] = _get_temp_reponame(repodata.baseurl) | ||
| 552 | |||
| 553 | kickstart_repos.append(repo) | ||
| 554 | |||
| 555 | return kickstart_repos | ||
| 556 | |||
| 557 | def _get_uncompressed_data_from_url(url, filename, proxies): | ||
| 558 | filename = myurlgrab(url, filename, proxies) | ||
| 559 | suffix = None | ||
| 560 | if filename.endswith(".gz"): | ||
| 561 | suffix = ".gz" | ||
| 562 | runner.quiet(['gunzip', "-f", filename]) | ||
| 563 | elif filename.endswith(".bz2"): | ||
| 564 | suffix = ".bz2" | ||
| 565 | runner.quiet(['bunzip2', "-f", filename]) | ||
| 566 | if suffix: | ||
| 567 | filename = filename.replace(suffix, "") | ||
| 568 | return filename | ||
| 569 | |||
| 570 | def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename, | ||
| 571 | sumtype=None, checksum=None): | ||
| 572 | url = os.path.join(baseurl, filename) | ||
| 573 | filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename))) | ||
| 574 | if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"): | ||
| 575 | filename = os.path.splitext(filename_tmp)[0] | ||
| 576 | else: | ||
| 577 | filename = filename_tmp | ||
| 578 | if sumtype and checksum and os.path.exists(filename): | ||
| 579 | try: | ||
| 580 | sumcmd = find_binary_path("%ssum" % sumtype) | ||
| 581 | except: | ||
| 582 | file_checksum = None | ||
| 583 | else: | ||
| 584 | file_checksum = runner.outs([sumcmd, filename]).split()[0] | ||
| 585 | |||
| 586 | if file_checksum and file_checksum == checksum: | ||
| 587 | return filename | ||
| 588 | |||
| 589 | return _get_uncompressed_data_from_url(url,filename_tmp,proxies) | ||
| 590 | |||
| 591 | def get_metadata_from_repos(repos, cachedir): | ||
| 592 | my_repo_metadata = [] | ||
| 593 | for repo in repos: | ||
| 594 | reponame = repo['name'] | ||
| 595 | baseurl = repo['baseurl'] | ||
| 596 | |||
| 597 | |||
| 598 | if 'proxy' in repo: | ||
| 599 | proxy = repo['proxy'] | ||
| 600 | else: | ||
| 601 | proxy = get_proxy_for(baseurl) | ||
| 602 | |||
| 603 | proxies = None | ||
| 604 | if proxy: | ||
| 605 | proxies = {str(baseurl.split(":")[0]):str(proxy)} | ||
| 606 | |||
| 607 | makedirs(os.path.join(cachedir, reponame)) | ||
| 608 | url = os.path.join(baseurl, "repodata/repomd.xml") | ||
| 609 | filename = os.path.join(cachedir, reponame, 'repomd.xml') | ||
| 610 | repomd = myurlgrab(url, filename, proxies) | ||
| 611 | try: | ||
| 612 | root = xmlparse(repomd) | ||
| 613 | except SyntaxError: | ||
| 614 | raise CreatorError("repomd.xml syntax error.") | ||
| 615 | |||
| 616 | ns = root.getroot().tag | ||
| 617 | ns = ns[0:ns.rindex("}")+1] | ||
| 618 | |||
| 619 | filepaths = {} | ||
| 620 | checksums = {} | ||
| 621 | sumtypes = {} | ||
| 622 | |||
| 623 | for elm in root.getiterator("%sdata" % ns): | ||
| 624 | if elm.attrib["type"] == "patterns": | ||
| 625 | filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href'] | ||
| 626 | checksums['patterns'] = elm.find("%sopen-checksum" % ns).text | ||
| 627 | sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
| 628 | break | ||
| 629 | |||
| 630 | for elm in root.getiterator("%sdata" % ns): | ||
| 631 | if elm.attrib["type"] in ("group_gz", "group"): | ||
| 632 | filepaths['comps'] = elm.find("%slocation" % ns).attrib['href'] | ||
| 633 | checksums['comps'] = elm.find("%sopen-checksum" % ns).text | ||
| 634 | sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
| 635 | break | ||
| 636 | |||
| 637 | primary_type = None | ||
| 638 | for elm in root.getiterator("%sdata" % ns): | ||
| 639 | if elm.attrib["type"] in ("primary_db", "primary"): | ||
| 640 | primary_type = elm.attrib["type"] | ||
| 641 | filepaths['primary'] = elm.find("%slocation" % ns).attrib['href'] | ||
| 642 | checksums['primary'] = elm.find("%sopen-checksum" % ns).text | ||
| 643 | sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type'] | ||
| 644 | break | ||
| 645 | |||
| 646 | if not primary_type: | ||
| 647 | continue | ||
| 648 | |||
| 649 | for item in ("primary", "patterns", "comps"): | ||
| 650 | if item not in filepaths: | ||
| 651 | filepaths[item] = None | ||
| 652 | continue | ||
| 653 | if not filepaths[item]: | ||
| 654 | continue | ||
| 655 | filepaths[item] = _get_metadata_from_repo(baseurl, | ||
| 656 | proxies, | ||
| 657 | cachedir, | ||
| 658 | reponame, | ||
| 659 | filepaths[item], | ||
| 660 | sumtypes[item], | ||
| 661 | checksums[item]) | ||
| 662 | |||
| 663 | """ Get repo key """ | ||
| 664 | try: | ||
| 665 | repokey = _get_metadata_from_repo(baseurl, | ||
| 666 | proxies, | ||
| 667 | cachedir, | ||
| 668 | reponame, | ||
| 669 | "repodata/repomd.xml.key") | ||
| 670 | except CreatorError: | ||
| 671 | repokey = None | ||
| 672 | msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key")) | ||
| 673 | |||
| 674 | my_repo_metadata.append({"name":reponame, | ||
| 675 | "baseurl":baseurl, | ||
| 676 | "repomd":repomd, | ||
| 677 | "primary":filepaths['primary'], | ||
| 678 | "cachedir":cachedir, | ||
| 679 | "proxies":proxies, | ||
| 680 | "patterns":filepaths['patterns'], | ||
| 681 | "comps":filepaths['comps'], | ||
| 682 | "repokey":repokey}) | ||
| 683 | |||
| 684 | return my_repo_metadata | ||
| 685 | |||
| 686 | def get_rpmver_in_repo(repometadata): | ||
| 687 | for repo in repometadata: | ||
| 688 | if repo["primary"].endswith(".xml"): | ||
| 689 | root = xmlparse(repo["primary"]) | ||
| 690 | ns = root.getroot().tag | ||
| 691 | ns = ns[0:ns.rindex("}")+1] | ||
| 692 | |||
| 693 | versionlist = [] | ||
| 694 | for elm in root.getiterator("%spackage" % ns): | ||
| 695 | if elm.find("%sname" % ns).text == 'rpm': | ||
| 696 | for node in elm.getchildren(): | ||
| 697 | if node.tag == "%sversion" % ns: | ||
| 698 | versionlist.append(node.attrib['ver']) | ||
| 699 | |||
| 700 | if versionlist: | ||
| 701 | return reversed( | ||
| 702 | sorted( | ||
| 703 | versionlist, | ||
| 704 | key = lambda ver: map(int, ver.split('.')))).next() | ||
| 705 | |||
| 706 | elif repo["primary"].endswith(".sqlite"): | ||
| 707 | con = sqlite.connect(repo["primary"]) | ||
| 708 | for row in con.execute("select version from packages where " | ||
| 709 | "name=\"rpm\" ORDER by version DESC"): | ||
| 710 | con.close() | ||
| 711 | return row[0] | ||
| 712 | |||
| 713 | return None | ||
| 714 | |||
| 715 | def get_arch(repometadata): | ||
| 716 | archlist = [] | ||
| 717 | for repo in repometadata: | ||
| 718 | if repo["primary"].endswith(".xml"): | ||
| 719 | root = xmlparse(repo["primary"]) | ||
| 720 | ns = root.getroot().tag | ||
| 721 | ns = ns[0:ns.rindex("}")+1] | ||
| 722 | for elm in root.getiterator("%spackage" % ns): | ||
| 723 | if elm.find("%sarch" % ns).text not in ("noarch", "src"): | ||
| 724 | arch = elm.find("%sarch" % ns).text | ||
| 725 | if arch not in archlist: | ||
| 726 | archlist.append(arch) | ||
| 727 | elif repo["primary"].endswith(".sqlite"): | ||
| 728 | con = sqlite.connect(repo["primary"]) | ||
| 729 | for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"): | ||
| 730 | if row[0] not in archlist: | ||
| 731 | archlist.append(row[0]) | ||
| 732 | |||
| 733 | con.close() | ||
| 734 | |||
| 735 | uniq_arch = [] | ||
| 736 | for i in range(len(archlist)): | ||
| 737 | if archlist[i] not in rpmmisc.archPolicies.keys(): | ||
| 738 | continue | ||
| 739 | need_append = True | ||
| 740 | j = 0 | ||
| 741 | while j < len(uniq_arch): | ||
| 742 | if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'): | ||
| 743 | need_append = False | ||
| 744 | break | ||
| 745 | if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'): | ||
| 746 | if need_append: | ||
| 747 | uniq_arch[j] = archlist[i] | ||
| 748 | need_append = False | ||
| 749 | else: | ||
| 750 | uniq_arch.remove(uniq_arch[j]) | ||
| 751 | continue | ||
| 752 | j += 1 | ||
| 753 | if need_append: | ||
| 754 | uniq_arch.append(archlist[i]) | ||
| 755 | |||
| 756 | return uniq_arch, archlist | ||
| 757 | |||
| 758 | def get_package(pkg, repometadata, arch = None): | ||
| 759 | ver = "" | ||
| 760 | target_repo = None | ||
| 761 | if not arch: | ||
| 762 | arches = [] | ||
| 763 | elif arch not in rpmmisc.archPolicies: | ||
| 764 | arches = [arch] | ||
| 765 | else: | ||
| 766 | arches = rpmmisc.archPolicies[arch].split(':') | ||
| 767 | arches.append('noarch') | ||
| 768 | |||
| 769 | for repo in repometadata: | ||
| 770 | if repo["primary"].endswith(".xml"): | ||
| 771 | root = xmlparse(repo["primary"]) | ||
| 772 | ns = root.getroot().tag | ||
| 773 | ns = ns[0:ns.rindex("}")+1] | ||
| 774 | for elm in root.getiterator("%spackage" % ns): | ||
| 775 | if elm.find("%sname" % ns).text == pkg: | ||
| 776 | if elm.find("%sarch" % ns).text in arches: | ||
| 777 | version = elm.find("%sversion" % ns) | ||
| 778 | tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) | ||
| 779 | if tmpver > ver: | ||
| 780 | ver = tmpver | ||
| 781 | location = elm.find("%slocation" % ns) | ||
| 782 | pkgpath = "%s" % location.attrib['href'] | ||
| 783 | target_repo = repo | ||
| 784 | break | ||
| 785 | if repo["primary"].endswith(".sqlite"): | ||
| 786 | con = sqlite.connect(repo["primary"]) | ||
| 787 | if arch: | ||
| 788 | sql = 'select version, release, location_href from packages ' \ | ||
| 789 | 'where name = "%s" and arch IN ("%s")' % \ | ||
| 790 | (pkg, '","'.join(arches)) | ||
| 791 | for row in con.execute(sql): | ||
| 792 | tmpver = "%s-%s" % (row[0], row[1]) | ||
| 793 | if tmpver > ver: | ||
| 794 | ver = tmpver | ||
| 795 | pkgpath = "%s" % row[2] | ||
| 796 | target_repo = repo | ||
| 797 | break | ||
| 798 | else: | ||
| 799 | sql = 'select version, release, location_href from packages ' \ | ||
| 800 | 'where name = "%s"' % pkg | ||
| 801 | for row in con.execute(sql): | ||
| 802 | tmpver = "%s-%s" % (row[0], row[1]) | ||
| 803 | if tmpver > ver: | ||
| 804 | ver = tmpver | ||
| 805 | pkgpath = "%s" % row[2] | ||
| 806 | target_repo = repo | ||
| 807 | break | ||
| 808 | con.close() | ||
| 809 | if target_repo: | ||
| 810 | makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"])) | ||
| 811 | url = os.path.join(target_repo["baseurl"], pkgpath) | ||
| 812 | filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath))) | ||
| 813 | if os.path.exists(filename): | ||
| 814 | ret = rpmmisc.checkRpmIntegrity('rpm', filename) | ||
| 815 | if ret == 0: | ||
| 816 | return filename | ||
| 817 | |||
| 818 | msger.warning("package %s is damaged: %s" % | ||
| 819 | (os.path.basename(filename), filename)) | ||
| 820 | os.unlink(filename) | ||
| 821 | |||
| 822 | pkg = myurlgrab(str(url), filename, target_repo["proxies"]) | ||
| 823 | return pkg | ||
| 824 | else: | ||
| 825 | return None | ||
| 826 | |||
| 827 | def get_source_name(pkg, repometadata): | ||
| 828 | |||
| 829 | def get_bin_name(pkg): | ||
| 830 | m = RPM_RE.match(pkg) | ||
| 831 | if m: | ||
| 832 | return m.group(1) | ||
| 833 | return None | ||
| 834 | |||
| 835 | def get_src_name(srpm): | ||
| 836 | m = SRPM_RE.match(srpm) | ||
| 837 | if m: | ||
| 838 | return m.group(1) | ||
| 839 | return None | ||
| 840 | |||
| 841 | ver = "" | ||
| 842 | target_repo = None | ||
| 843 | |||
| 844 | pkg_name = get_bin_name(pkg) | ||
| 845 | if not pkg_name: | ||
| 846 | return None | ||
| 847 | |||
| 848 | for repo in repometadata: | ||
| 849 | if repo["primary"].endswith(".xml"): | ||
| 850 | root = xmlparse(repo["primary"]) | ||
| 851 | ns = root.getroot().tag | ||
| 852 | ns = ns[0:ns.rindex("}")+1] | ||
| 853 | for elm in root.getiterator("%spackage" % ns): | ||
| 854 | if elm.find("%sname" % ns).text == pkg_name: | ||
| 855 | if elm.find("%sarch" % ns).text != "src": | ||
| 856 | version = elm.find("%sversion" % ns) | ||
| 857 | tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) | ||
| 858 | if tmpver > ver: | ||
| 859 | ver = tmpver | ||
| 860 | fmt = elm.find("%sformat" % ns) | ||
| 861 | if fmt: | ||
| 862 | fns = fmt.getchildren()[0].tag | ||
| 863 | fns = fns[0:fns.rindex("}")+1] | ||
| 864 | pkgpath = fmt.find("%ssourcerpm" % fns).text | ||
| 865 | target_repo = repo | ||
| 866 | break | ||
| 867 | |||
| 868 | if repo["primary"].endswith(".sqlite"): | ||
| 869 | con = sqlite.connect(repo["primary"]) | ||
| 870 | for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name): | ||
| 871 | tmpver = "%s-%s" % (row[0], row[1]) | ||
| 872 | if tmpver > ver: | ||
| 873 | pkgpath = "%s" % row[2] | ||
| 874 | target_repo = repo | ||
| 875 | break | ||
| 876 | con.close() | ||
| 877 | if target_repo: | ||
| 878 | return get_src_name(pkgpath) | ||
| 879 | else: | ||
| 880 | return None | ||
| 881 | |||
| 882 | def get_pkglist_in_patterns(group, patterns): | ||
| 883 | found = False | ||
| 884 | pkglist = [] | ||
| 885 | try: | ||
| 886 | root = xmlparse(patterns) | ||
| 887 | except SyntaxError: | ||
| 888 | raise SyntaxError("%s syntax error." % patterns) | ||
| 889 | |||
| 890 | for elm in list(root.getroot()): | ||
| 891 | ns = elm.tag | ||
| 892 | ns = ns[0:ns.rindex("}")+1] | ||
| 893 | name = elm.find("%sname" % ns) | ||
| 894 | summary = elm.find("%ssummary" % ns) | ||
| 895 | if name.text == group or summary.text == group: | ||
| 896 | found = True | ||
| 897 | break | ||
| 898 | |||
| 899 | if not found: | ||
| 900 | return pkglist | ||
| 901 | |||
| 902 | found = False | ||
| 903 | for requires in list(elm): | ||
| 904 | if requires.tag.endswith("requires"): | ||
| 905 | found = True | ||
| 906 | break | ||
| 907 | |||
| 908 | if not found: | ||
| 909 | return pkglist | ||
| 910 | |||
| 911 | for pkg in list(requires): | ||
| 912 | pkgname = pkg.attrib["name"] | ||
| 913 | if pkgname not in pkglist: | ||
| 914 | pkglist.append(pkgname) | ||
| 915 | |||
| 916 | return pkglist | ||
| 917 | |||
| 918 | def get_pkglist_in_comps(group, comps): | ||
| 919 | found = False | ||
| 920 | pkglist = [] | ||
| 921 | try: | ||
| 922 | root = xmlparse(comps) | ||
| 923 | except SyntaxError: | ||
| 924 | raise SyntaxError("%s syntax error." % comps) | ||
| 925 | |||
| 926 | for elm in root.getiterator("group"): | ||
| 927 | id = elm.find("id") | ||
| 928 | name = elm.find("name") | ||
| 929 | if id.text == group or name.text == group: | ||
| 930 | packagelist = elm.find("packagelist") | ||
| 931 | found = True | ||
| 932 | break | ||
| 933 | |||
| 934 | if not found: | ||
| 935 | return pkglist | ||
| 936 | |||
| 937 | for require in elm.getiterator("packagereq"): | ||
| 938 | if require.tag.endswith("packagereq"): | ||
| 939 | pkgname = require.text | ||
| 940 | if pkgname not in pkglist: | ||
| 941 | pkglist.append(pkgname) | ||
| 942 | |||
| 943 | return pkglist | ||
| 944 | |||
| 945 | def is_statically_linked(binary): | ||
| 946 | return ", statically linked, " in runner.outs(['file', binary]) | ||
| 947 | |||
| 948 | def setup_qemu_emulator(rootdir, arch): | ||
| 949 | # mount binfmt_misc if it doesn't exist | ||
| 950 | if not os.path.exists("/proc/sys/fs/binfmt_misc"): | ||
| 951 | modprobecmd = find_binary_path("modprobe") | ||
| 952 | runner.show([modprobecmd, "binfmt_misc"]) | ||
| 953 | if not os.path.exists("/proc/sys/fs/binfmt_misc/register"): | ||
| 954 | mountcmd = find_binary_path("mount") | ||
| 955 | runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"]) | ||
| 956 | |||
| 957 | # qemu_emulator is a special case, we can't use find_binary_path | ||
| 958 | # qemu emulator should be a statically-linked executable file | ||
| 959 | qemu_emulator = "/usr/bin/qemu-arm" | ||
| 960 | if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator): | ||
| 961 | qemu_emulator = "/usr/bin/qemu-arm-static" | ||
| 962 | if not os.path.exists(qemu_emulator): | ||
| 963 | raise CreatorError("Please install a statically-linked qemu-arm") | ||
| 964 | |||
| 965 | # qemu emulator version check | ||
| 966 | armv7_list = [arch for arch in rpmmisc.archPolicies.keys() if arch.startswith('armv7')] | ||
| 967 | if arch in armv7_list: # need qemu (>=0.13.0) | ||
| 968 | qemuout = runner.outs([qemu_emulator, "-h"]) | ||
| 969 | m = re.search("version\s*([.\d]+)", qemuout) | ||
| 970 | if m: | ||
| 971 | qemu_version = m.group(1) | ||
| 972 | if qemu_version < "0.13": | ||
| 973 | raise CreatorError("Requires %s version >=0.13 for %s" % (qemu_emulator, arch)) | ||
| 974 | else: | ||
| 975 | msger.warning("Can't get version info of %s, please make sure it's higher than 0.13.0" % qemu_emulator) | ||
| 976 | |||
| 977 | if not os.path.exists(rootdir + "/usr/bin"): | ||
| 978 | makedirs(rootdir + "/usr/bin") | ||
| 979 | shutil.copy(qemu_emulator, rootdir + "/usr/bin/qemu-arm-static") | ||
| 980 | qemu_emulator = "/usr/bin/qemu-arm-static" | ||
| 981 | |||
| 982 | # disable selinux, selinux will block qemu emulator to run | ||
| 983 | if os.path.exists("/usr/sbin/setenforce"): | ||
| 984 | msger.info('Try to disable selinux') | ||
| 985 | runner.show(["/usr/sbin/setenforce", "0"]) | ||
| 986 | |||
| 987 | # unregister it if it has been registered and is a dynamically-linked executable | ||
| 988 | node = "/proc/sys/fs/binfmt_misc/arm" | ||
| 989 | if os.path.exists(node): | ||
| 990 | qemu_unregister_string = "-1\n" | ||
| 991 | fd = open("/proc/sys/fs/binfmt_misc/arm", "w") | ||
| 992 | fd.write(qemu_unregister_string) | ||
| 993 | fd.close() | ||
| 994 | |||
| 995 | # register qemu emulator for interpreting other arch executable file | ||
| 996 | if not os.path.exists(node): | ||
| 997 | qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator | ||
| 998 | fd = open("/proc/sys/fs/binfmt_misc/register", "w") | ||
| 999 | fd.write(qemu_arm_string) | ||
| 1000 | fd.close() | ||
| 1001 | |||
| 1002 | return qemu_emulator | ||
| 1003 | |||
| 1004 | def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir): | ||
| 1005 | def get_source_repometadata(repometadata): | ||
| 1006 | src_repometadata=[] | ||
| 1007 | for repo in repometadata: | ||
| 1008 | if repo["name"].endswith("-source"): | ||
| 1009 | src_repometadata.append(repo) | ||
| 1010 | if src_repometadata: | ||
| 1011 | return src_repometadata | ||
| 1012 | return None | ||
| 1013 | |||
| 1014 | def get_src_name(srpm): | ||
| 1015 | m = SRPM_RE.match(srpm) | ||
| 1016 | if m: | ||
| 1017 | return m.group(1) | ||
| 1018 | return None | ||
| 1019 | |||
| 1020 | src_repometadata = get_source_repometadata(repometadata) | ||
| 1021 | |||
| 1022 | if not src_repometadata: | ||
| 1023 | msger.warning("No source repo found") | ||
| 1024 | return None | ||
| 1025 | |||
| 1026 | src_pkgs = [] | ||
| 1027 | lpkgs_dict = {} | ||
| 1028 | lpkgs_path = [] | ||
| 1029 | for repo in src_repometadata: | ||
| 1030 | cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"]) | ||
| 1031 | lpkgs_path += glob.glob(cachepath) | ||
| 1032 | |||
| 1033 | for lpkg in lpkgs_path: | ||
| 1034 | lpkg_name = get_src_name(os.path.basename(lpkg)) | ||
| 1035 | lpkgs_dict[lpkg_name] = lpkg | ||
| 1036 | localpkgs = lpkgs_dict.keys() | ||
| 1037 | |||
| 1038 | cached_count = 0 | ||
| 1039 | destdir = instroot+'/usr/src/SRPMS' | ||
| 1040 | if not os.path.exists(destdir): | ||
| 1041 | os.makedirs(destdir) | ||
| 1042 | |||
| 1043 | srcpkgset = set() | ||
| 1044 | for _pkg in pkgs: | ||
| 1045 | srcpkg_name = get_source_name(_pkg, repometadata) | ||
| 1046 | if not srcpkg_name: | ||
| 1047 | continue | ||
| 1048 | srcpkgset.add(srcpkg_name) | ||
| 1049 | |||
| 1050 | for pkg in list(srcpkgset): | ||
| 1051 | if pkg in localpkgs: | ||
| 1052 | cached_count += 1 | ||
| 1053 | shutil.copy(lpkgs_dict[pkg], destdir) | ||
| 1054 | src_pkgs.append(os.path.basename(lpkgs_dict[pkg])) | ||
| 1055 | else: | ||
| 1056 | src_pkg = get_package(pkg, src_repometadata, 'src') | ||
| 1057 | if src_pkg: | ||
| 1058 | shutil.copy(src_pkg, destdir) | ||
| 1059 | src_pkgs.append(src_pkg) | ||
| 1060 | msger.info("%d source packages gotten from cache" % cached_count) | ||
| 1061 | |||
| 1062 | return src_pkgs | ||
| 1063 | |||
| 1064 | def strip_end(text, suffix): | ||
| 1065 | if not text.endswith(suffix): | ||
| 1066 | return text | ||
| 1067 | return text[:-len(suffix)] | ||
diff --git a/scripts/lib/mic/utils/partitionedfs.py b/scripts/lib/mic/utils/partitionedfs.py new file mode 100644 index 0000000000..04758440e1 --- /dev/null +++ b/scripts/lib/mic/utils/partitionedfs.py | |||
| @@ -0,0 +1,790 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2009, 2010, 2011 Intel, Inc. | ||
| 4 | # Copyright (c) 2007, 2008 Red Hat, Inc. | ||
| 5 | # Copyright (c) 2008 Daniel P. Berrange | ||
| 6 | # Copyright (c) 2008 David P. Huff | ||
| 7 | # | ||
| 8 | # This program is free software; you can redistribute it and/or modify it | ||
| 9 | # under the terms of the GNU General Public License as published by the Free | ||
| 10 | # Software Foundation; version 2 of the License | ||
| 11 | # | ||
| 12 | # This program is distributed in the hope that it will be useful, but | ||
| 13 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 14 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 15 | # for more details. | ||
| 16 | # | ||
| 17 | # You should have received a copy of the GNU General Public License along | ||
| 18 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 19 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 20 | |||
| 21 | import os | ||
| 22 | |||
| 23 | from mic import msger | ||
| 24 | from mic.utils import runner | ||
| 25 | from mic.utils.errors import MountError | ||
| 26 | from mic.utils.fs_related import * | ||
| 27 | from mic.utils.gpt_parser import GptParser | ||
| 28 | |||
| 29 | # Overhead of the MBR partitioning scheme (just one sector) | ||
| 30 | MBR_OVERHEAD = 1 | ||
| 31 | # Overhead of the GPT partitioning scheme | ||
| 32 | GPT_OVERHEAD = 34 | ||
| 33 | |||
| 34 | # Size of a sector in bytes | ||
| 35 | SECTOR_SIZE = 512 | ||
| 36 | |||
| 37 | class PartitionedMount(Mount): | ||
| 38 | def __init__(self, mountdir, skipformat = False): | ||
| 39 | Mount.__init__(self, mountdir) | ||
| 40 | self.disks = {} | ||
| 41 | self.partitions = [] | ||
| 42 | self.subvolumes = [] | ||
| 43 | self.mapped = False | ||
| 44 | self.mountOrder = [] | ||
| 45 | self.unmountOrder = [] | ||
| 46 | self.parted = find_binary_path("parted") | ||
| 47 | self.kpartx = find_binary_path("kpartx") | ||
| 48 | self.mkswap = find_binary_path("mkswap") | ||
| 49 | self.btrfscmd=None | ||
| 50 | self.mountcmd = find_binary_path("mount") | ||
| 51 | self.umountcmd = find_binary_path("umount") | ||
| 52 | self.skipformat = skipformat | ||
| 53 | self.snapshot_created = self.skipformat | ||
| 54 | # Size of a sector used in calculations | ||
| 55 | self.sector_size = SECTOR_SIZE | ||
| 56 | self._partitions_layed_out = False | ||
| 57 | |||
| 58 | def __add_disk(self, disk_name): | ||
| 59 | """ Add a disk 'disk_name' to the internal list of disks. Note, | ||
| 60 | 'disk_name' is the name of the disk in the target system | ||
| 61 | (e.g., sdb). """ | ||
| 62 | |||
| 63 | if disk_name in self.disks: | ||
| 64 | # We already have this disk | ||
| 65 | return | ||
| 66 | |||
| 67 | assert not self._partitions_layed_out | ||
| 68 | |||
| 69 | self.disks[disk_name] = \ | ||
| 70 | { 'disk': None, # Disk object | ||
| 71 | 'mapped': False, # True if kpartx mapping exists | ||
| 72 | 'numpart': 0, # Number of allocate partitions | ||
| 73 | 'partitions': [], # Indexes to self.partitions | ||
| 74 | 'offset': 0, # Offset of next partition (in sectors) | ||
| 75 | # Minimum required disk size to fit all partitions (in bytes) | ||
| 76 | 'min_size': 0, | ||
| 77 | 'ptable_format': "msdos" } # Partition table format | ||
| 78 | |||
| 79 | def add_disk(self, disk_name, disk_obj): | ||
| 80 | """ Add a disk object which have to be partitioned. More than one disk | ||
| 81 | can be added. In case of multiple disks, disk partitions have to be | ||
| 82 | added for each disk separately with 'add_partition()". """ | ||
| 83 | |||
| 84 | self.__add_disk(disk_name) | ||
| 85 | self.disks[disk_name]['disk'] = disk_obj | ||
| 86 | |||
| 87 | def __add_partition(self, part): | ||
| 88 | """ This is a helper function for 'add_partition()' which adds a | ||
| 89 | partition to the internal list of partitions. """ | ||
| 90 | |||
| 91 | assert not self._partitions_layed_out | ||
| 92 | |||
| 93 | self.partitions.append(part) | ||
| 94 | self.__add_disk(part['disk_name']) | ||
| 95 | |||
| 96 | def add_partition(self, size, disk_name, mountpoint, fstype = None, | ||
| 97 | label=None, fsopts = None, boot = False, align = None, | ||
| 98 | part_type = None): | ||
| 99 | """ Add the next partition. Prtitions have to be added in the | ||
| 100 | first-to-last order. """ | ||
| 101 | |||
| 102 | ks_pnum = len(self.partitions) | ||
| 103 | |||
| 104 | # Converting MB to sectors for parted | ||
| 105 | size = size * 1024 * 1024 / self.sector_size | ||
| 106 | |||
| 107 | # We need to handle subvolumes for btrfs | ||
| 108 | if fstype == "btrfs" and fsopts and fsopts.find("subvol=") != -1: | ||
| 109 | self.btrfscmd=find_binary_path("btrfs") | ||
| 110 | subvol = None | ||
| 111 | opts = fsopts.split(",") | ||
| 112 | for opt in opts: | ||
| 113 | if opt.find("subvol=") != -1: | ||
| 114 | subvol = opt.replace("subvol=", "").strip() | ||
| 115 | break | ||
| 116 | if not subvol: | ||
| 117 | raise MountError("No subvolume: %s" % fsopts) | ||
| 118 | self.subvolumes.append({'size': size, # In sectors | ||
| 119 | 'mountpoint': mountpoint, # Mount relative to chroot | ||
| 120 | 'fstype': fstype, # Filesystem type | ||
| 121 | 'fsopts': fsopts, # Filesystem mount options | ||
| 122 | 'disk_name': disk_name, # physical disk name holding partition | ||
| 123 | 'device': None, # kpartx device node for partition | ||
| 124 | 'mount': None, # Mount object | ||
| 125 | 'subvol': subvol, # Subvolume name | ||
| 126 | 'boot': boot, # Bootable flag | ||
| 127 | 'mounted': False # Mount flag | ||
| 128 | }) | ||
| 129 | |||
| 130 | # We still need partition for "/" or non-subvolume | ||
| 131 | if mountpoint == "/" or not fsopts or fsopts.find("subvol=") == -1: | ||
| 132 | # Don't need subvolume for "/" because it will be set as default subvolume | ||
| 133 | if fsopts and fsopts.find("subvol=") != -1: | ||
| 134 | opts = fsopts.split(",") | ||
| 135 | for opt in opts: | ||
| 136 | if opt.strip().startswith("subvol="): | ||
| 137 | opts.remove(opt) | ||
| 138 | break | ||
| 139 | fsopts = ",".join(opts) | ||
| 140 | |||
| 141 | part = { 'ks_pnum' : ks_pnum, # Partition number in the KS file | ||
| 142 | 'size': size, # In sectors | ||
| 143 | 'mountpoint': mountpoint, # Mount relative to chroot | ||
| 144 | 'fstype': fstype, # Filesystem type | ||
| 145 | 'fsopts': fsopts, # Filesystem mount options | ||
| 146 | 'label': label, # Partition label | ||
| 147 | 'disk_name': disk_name, # physical disk name holding partition | ||
| 148 | 'device': None, # kpartx device node for partition | ||
| 149 | 'mount': None, # Mount object | ||
| 150 | 'num': None, # Partition number | ||
| 151 | 'boot': boot, # Bootable flag | ||
| 152 | 'align': align, # Partition alignment | ||
| 153 | 'part_type' : part_type, # Partition type | ||
| 154 | 'partuuid': None } # Partition UUID (GPT-only) | ||
| 155 | |||
| 156 | self.__add_partition(part) | ||
| 157 | |||
| 158 | def layout_partitions(self, ptable_format = "msdos"): | ||
| 159 | """ Layout the partitions, meaning calculate the position of every | ||
| 160 | partition on the disk. The 'ptable_format' parameter defines the | ||
| 161 | partition table format, and may be either "msdos" or "gpt". """ | ||
| 162 | |||
| 163 | msger.debug("Assigning %s partitions to disks" % ptable_format) | ||
| 164 | |||
| 165 | if ptable_format not in ('msdos', 'gpt'): | ||
| 166 | raise MountError("Unknown partition table format '%s', supported " \ | ||
| 167 | "formats are: 'msdos' and 'gpt'" % ptable_format) | ||
| 168 | |||
| 169 | if self._partitions_layed_out: | ||
| 170 | return | ||
| 171 | |||
| 172 | self._partitions_layed_out = True | ||
| 173 | |||
| 174 | # Go through partitions in the order they are added in .ks file | ||
| 175 | for n in range(len(self.partitions)): | ||
| 176 | p = self.partitions[n] | ||
| 177 | |||
| 178 | if not self.disks.has_key(p['disk_name']): | ||
| 179 | raise MountError("No disk %s for partition %s" \ | ||
| 180 | % (p['disk_name'], p['mountpoint'])) | ||
| 181 | |||
| 182 | if p['part_type'] and ptable_format != 'gpt': | ||
| 183 | # The --part-type can also be implemented for MBR partitions, | ||
| 184 | # in which case it would map to the 1-byte "partition type" | ||
| 185 | # filed at offset 3 of the partition entry. | ||
| 186 | raise MountError("setting custom partition type is only " \ | ||
| 187 | "imlemented for GPT partitions") | ||
| 188 | |||
| 189 | # Get the disk where the partition is located | ||
| 190 | d = self.disks[p['disk_name']] | ||
| 191 | d['numpart'] += 1 | ||
| 192 | d['ptable_format'] = ptable_format | ||
| 193 | |||
| 194 | if d['numpart'] == 1: | ||
| 195 | if ptable_format == "msdos": | ||
| 196 | overhead = MBR_OVERHEAD | ||
| 197 | else: | ||
| 198 | overhead = GPT_OVERHEAD | ||
| 199 | |||
| 200 | # Skip one sector required for the partitioning scheme overhead | ||
| 201 | d['offset'] += overhead | ||
| 202 | # Steal few sectors from the first partition to offset for the | ||
| 203 | # partitioning overhead | ||
| 204 | p['size'] -= overhead | ||
| 205 | |||
| 206 | if p['align']: | ||
| 207 | # If not first partition and we do have alignment set we need | ||
| 208 | # to align the partition. | ||
| 209 | # FIXME: This leaves a empty spaces to the disk. To fill the | ||
| 210 | # gaps we could enlargea the previous partition? | ||
| 211 | |||
| 212 | # Calc how much the alignment is off. | ||
| 213 | align_sectors = d['offset'] % (p['align'] * 1024 / self.sector_size) | ||
| 214 | # We need to move forward to the next alignment point | ||
| 215 | align_sectors = (p['align'] * 1024 / self.sector_size) - align_sectors | ||
| 216 | |||
| 217 | msger.debug("Realignment for %s%s with %s sectors, original" | ||
| 218 | " offset %s, target alignment is %sK." % | ||
| 219 | (p['disk_name'], d['numpart'], align_sectors, | ||
| 220 | d['offset'], p['align'])) | ||
| 221 | |||
| 222 | # increase the offset so we actually start the partition on right alignment | ||
| 223 | d['offset'] += align_sectors | ||
| 224 | |||
| 225 | p['start'] = d['offset'] | ||
| 226 | d['offset'] += p['size'] | ||
| 227 | |||
| 228 | p['type'] = 'primary' | ||
| 229 | p['num'] = d['numpart'] | ||
| 230 | |||
| 231 | if d['ptable_format'] == "msdos": | ||
| 232 | if d['numpart'] > 2: | ||
| 233 | # Every logical partition requires an additional sector for | ||
| 234 | # the EBR, so steal the last sector from the end of each | ||
| 235 | # partition starting from the 3rd one for the EBR. This | ||
| 236 | # will make sure the logical partitions are aligned | ||
| 237 | # correctly. | ||
| 238 | p['size'] -= 1 | ||
| 239 | |||
| 240 | if d['numpart'] > 3: | ||
| 241 | p['type'] = 'logical' | ||
| 242 | p['num'] = d['numpart'] + 1 | ||
| 243 | |||
| 244 | d['partitions'].append(n) | ||
| 245 | msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d " | ||
| 246 | "sectors (%d bytes)." \ | ||
| 247 | % (p['mountpoint'], p['disk_name'], p['num'], | ||
| 248 | p['start'], p['start'] + p['size'] - 1, | ||
| 249 | p['size'], p['size'] * self.sector_size)) | ||
| 250 | |||
| 251 | # Once all the partitions have been layed out, we can calculate the | ||
| 252 | # minumim disk sizes. | ||
| 253 | for disk_name, d in self.disks.items(): | ||
| 254 | d['min_size'] = d['offset'] | ||
| 255 | if d['ptable_format'] == 'gpt': | ||
| 256 | # Account for the backup partition table at the end of the disk | ||
| 257 | d['min_size'] += GPT_OVERHEAD | ||
| 258 | |||
| 259 | d['min_size'] *= self.sector_size | ||
| 260 | |||
| 261 | def __run_parted(self, args): | ||
| 262 | """ Run parted with arguments specified in the 'args' list. """ | ||
| 263 | |||
| 264 | args.insert(0, self.parted) | ||
| 265 | msger.debug(args) | ||
| 266 | |||
| 267 | rc, out = runner.runtool(args, catch = 3) | ||
| 268 | out = out.strip() | ||
| 269 | if out: | ||
| 270 | msger.debug('"parted" output: %s' % out) | ||
| 271 | |||
| 272 | if rc != 0: | ||
| 273 | # We don't throw exception when return code is not 0, because | ||
| 274 | # parted always fails to reload part table with loop devices. This | ||
| 275 | # prevents us from distinguishing real errors based on return | ||
| 276 | # code. | ||
| 277 | msger.debug("WARNING: parted returned '%s' instead of 0" % rc) | ||
| 278 | |||
| 279 | def __create_partition(self, device, parttype, fstype, start, size): | ||
| 280 | """ Create a partition on an image described by the 'device' object. """ | ||
| 281 | |||
| 282 | # Start is included to the size so we need to substract one from the end. | ||
| 283 | end = start + size - 1 | ||
| 284 | msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" % | ||
| 285 | (parttype, start, end, size)) | ||
| 286 | |||
| 287 | args = ["-s", device, "unit", "s", "mkpart", parttype] | ||
| 288 | if fstype: | ||
| 289 | args.extend([fstype]) | ||
| 290 | args.extend(["%d" % start, "%d" % end]) | ||
| 291 | |||
| 292 | return self.__run_parted(args) | ||
| 293 | |||
| 294 | def __format_disks(self): | ||
| 295 | self.layout_partitions() | ||
| 296 | |||
| 297 | if self.skipformat: | ||
| 298 | msger.debug("Skipping disk format, because skipformat flag is set.") | ||
| 299 | return | ||
| 300 | |||
| 301 | for dev in self.disks.keys(): | ||
| 302 | d = self.disks[dev] | ||
| 303 | msger.debug("Initializing partition table for %s" % \ | ||
| 304 | (d['disk'].device)) | ||
| 305 | self.__run_parted(["-s", d['disk'].device, "mklabel", | ||
| 306 | d['ptable_format']]) | ||
| 307 | |||
| 308 | msger.debug("Creating partitions") | ||
| 309 | |||
| 310 | for p in self.partitions: | ||
| 311 | d = self.disks[p['disk_name']] | ||
| 312 | if d['ptable_format'] == "msdos" and p['num'] == 5: | ||
| 313 | # The last sector of the 3rd partition was reserved for the EBR | ||
| 314 | # of the first _logical_ partition. This is why the extended | ||
| 315 | # partition should start one sector before the first logical | ||
| 316 | # partition. | ||
| 317 | self.__create_partition(d['disk'].device, "extended", | ||
| 318 | None, p['start'] - 1, | ||
| 319 | d['offset'] - p['start']) | ||
| 320 | |||
| 321 | if p['fstype'] == "swap": | ||
| 322 | parted_fs_type = "linux-swap" | ||
| 323 | elif p['fstype'] == "vfat": | ||
| 324 | parted_fs_type = "fat32" | ||
| 325 | elif p['fstype'] == "msdos": | ||
| 326 | parted_fs_type = "fat16" | ||
| 327 | else: | ||
| 328 | # Type for ext2/ext3/ext4/btrfs | ||
| 329 | parted_fs_type = "ext2" | ||
| 330 | |||
| 331 | # Boot ROM of OMAP boards require vfat boot partition to have an | ||
| 332 | # even number of sectors. | ||
| 333 | if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat", "msdos"] \ | ||
| 334 | and p['size'] % 2: | ||
| 335 | msger.debug("Substracting one sector from '%s' partition to " \ | ||
| 336 | "get even number of sectors for the partition" % \ | ||
| 337 | p['mountpoint']) | ||
| 338 | p['size'] -= 1 | ||
| 339 | |||
| 340 | self.__create_partition(d['disk'].device, p['type'], | ||
| 341 | parted_fs_type, p['start'], p['size']) | ||
| 342 | |||
| 343 | if p['boot']: | ||
| 344 | if d['ptable_format'] == 'gpt': | ||
| 345 | flag_name = "legacy_boot" | ||
| 346 | else: | ||
| 347 | flag_name = "boot" | ||
| 348 | msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \ | ||
| 349 | (flag_name, p['num'], d['disk'].device)) | ||
| 350 | self.__run_parted(["-s", d['disk'].device, "set", | ||
| 351 | "%d" % p['num'], flag_name, "on"]) | ||
| 352 | |||
| 353 | # If the partition table format is "gpt", find out PARTUUIDs for all | ||
| 354 | # the partitions. And if users specified custom parition type UUIDs, | ||
| 355 | # set them. | ||
| 356 | for disk_name, disk in self.disks.items(): | ||
| 357 | if disk['ptable_format'] != 'gpt': | ||
| 358 | continue | ||
| 359 | |||
| 360 | pnum = 0 | ||
| 361 | gpt_parser = GptParser(d['disk'].device, SECTOR_SIZE) | ||
| 362 | # Iterate over all GPT partitions on this disk | ||
| 363 | for entry in gpt_parser.get_partitions(): | ||
| 364 | pnum += 1 | ||
| 365 | # Find the matching partition in the 'self.partitions' list | ||
| 366 | for n in d['partitions']: | ||
| 367 | p = self.partitions[n] | ||
| 368 | if p['num'] == pnum: | ||
| 369 | # Found, fetch PARTUUID (partition's unique ID) | ||
| 370 | p['partuuid'] = entry['part_uuid'] | ||
| 371 | msger.debug("PARTUUID for partition %d on disk '%s' " \ | ||
| 372 | "(mount point '%s') is '%s'" % (pnum, \ | ||
| 373 | disk_name, p['mountpoint'], p['partuuid'])) | ||
| 374 | if p['part_type']: | ||
| 375 | entry['type_uuid'] = p['part_type'] | ||
| 376 | msger.debug("Change type of partition %d on disk " \ | ||
| 377 | "'%s' (mount point '%s') to '%s'" % \ | ||
| 378 | (pnum, disk_name, p['mountpoint'], | ||
| 379 | p['part_type'])) | ||
| 380 | gpt_parser.change_partition(entry) | ||
| 381 | |||
| 382 | del gpt_parser | ||
| 383 | |||
| 384 | def __map_partitions(self): | ||
| 385 | """Load it if dm_snapshot isn't loaded. """ | ||
| 386 | load_module("dm_snapshot") | ||
| 387 | |||
| 388 | for dev in self.disks.keys(): | ||
| 389 | d = self.disks[dev] | ||
| 390 | if d['mapped']: | ||
| 391 | continue | ||
| 392 | |||
| 393 | msger.debug("Running kpartx on %s" % d['disk'].device ) | ||
| 394 | rc, kpartxOutput = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device]) | ||
| 395 | kpartxOutput = kpartxOutput.splitlines() | ||
| 396 | |||
| 397 | if rc != 0: | ||
| 398 | raise MountError("Failed to query partition mapping for '%s'" % | ||
| 399 | d['disk'].device) | ||
| 400 | |||
| 401 | # Strip trailing blank and mask verbose output | ||
| 402 | i = 0 | ||
| 403 | while i < len(kpartxOutput) and kpartxOutput[i][0:4] != "loop": | ||
| 404 | i = i + 1 | ||
| 405 | kpartxOutput = kpartxOutput[i:] | ||
| 406 | |||
| 407 | # Make sure kpartx reported the right count of partitions | ||
| 408 | if len(kpartxOutput) != d['numpart']: | ||
| 409 | # If this disk has more than 3 partitions, then in case of MBR | ||
| 410 | # paritions there is an extended parition. Different versions | ||
| 411 | # of kpartx behave differently WRT the extended partition - | ||
| 412 | # some map it, some ignore it. This is why we do the below hack | ||
| 413 | # - if kpartx reported one more partition and the partition | ||
| 414 | # table type is "msdos" and the amount of partitions is more | ||
| 415 | # than 3, we just assume kpartx mapped the extended parition | ||
| 416 | # and we remove it. | ||
| 417 | if len(kpartxOutput) == d['numpart'] + 1 \ | ||
| 418 | and d['ptable_format'] == 'msdos' and len(kpartxOutput) > 3: | ||
| 419 | kpartxOutput.pop(3) | ||
| 420 | else: | ||
| 421 | raise MountError("Unexpected number of partitions from " \ | ||
| 422 | "kpartx: %d != %d" % \ | ||
| 423 | (len(kpartxOutput), d['numpart'])) | ||
| 424 | |||
| 425 | for i in range(len(kpartxOutput)): | ||
| 426 | line = kpartxOutput[i] | ||
| 427 | newdev = line.split()[0] | ||
| 428 | mapperdev = "/dev/mapper/" + newdev | ||
| 429 | loopdev = d['disk'].device + newdev[-1] | ||
| 430 | |||
| 431 | msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev)) | ||
| 432 | pnum = d['partitions'][i] | ||
| 433 | self.partitions[pnum]['device'] = loopdev | ||
| 434 | |||
| 435 | # grub's install wants partitions to be named | ||
| 436 | # to match their parent device + partition num | ||
| 437 | # kpartx doesn't work like this, so we add compat | ||
| 438 | # symlinks to point to /dev/mapper | ||
| 439 | if os.path.lexists(loopdev): | ||
| 440 | os.unlink(loopdev) | ||
| 441 | os.symlink(mapperdev, loopdev) | ||
| 442 | |||
| 443 | msger.debug("Adding partx mapping for %s" % d['disk'].device) | ||
| 444 | rc = runner.show([self.kpartx, "-v", "-a", d['disk'].device]) | ||
| 445 | |||
| 446 | if rc != 0: | ||
| 447 | # Make sure that the device maps are also removed on error case. | ||
| 448 | # The d['mapped'] isn't set to True if the kpartx fails so | ||
| 449 | # failed mapping will not be cleaned on cleanup either. | ||
| 450 | runner.quiet([self.kpartx, "-d", d['disk'].device]) | ||
| 451 | raise MountError("Failed to map partitions for '%s'" % | ||
| 452 | d['disk'].device) | ||
| 453 | |||
| 454 | # FIXME: there is a bit delay for multipath device setup, | ||
| 455 | # wait 10ms for the setup | ||
| 456 | import time | ||
| 457 | time.sleep(10) | ||
| 458 | d['mapped'] = True | ||
| 459 | |||
| 460 | def __unmap_partitions(self): | ||
| 461 | for dev in self.disks.keys(): | ||
| 462 | d = self.disks[dev] | ||
| 463 | if not d['mapped']: | ||
| 464 | continue | ||
| 465 | |||
| 466 | msger.debug("Removing compat symlinks") | ||
| 467 | for pnum in d['partitions']: | ||
| 468 | if self.partitions[pnum]['device'] != None: | ||
| 469 | os.unlink(self.partitions[pnum]['device']) | ||
| 470 | self.partitions[pnum]['device'] = None | ||
| 471 | |||
| 472 | msger.debug("Unmapping %s" % d['disk'].device) | ||
| 473 | rc = runner.quiet([self.kpartx, "-d", d['disk'].device]) | ||
| 474 | if rc != 0: | ||
| 475 | raise MountError("Failed to unmap partitions for '%s'" % | ||
| 476 | d['disk'].device) | ||
| 477 | |||
| 478 | d['mapped'] = False | ||
| 479 | |||
| 480 | def __calculate_mountorder(self): | ||
| 481 | msger.debug("Calculating mount order") | ||
| 482 | for p in self.partitions: | ||
| 483 | if p['mountpoint']: | ||
| 484 | self.mountOrder.append(p['mountpoint']) | ||
| 485 | self.unmountOrder.append(p['mountpoint']) | ||
| 486 | |||
| 487 | self.mountOrder.sort() | ||
| 488 | self.unmountOrder.sort() | ||
| 489 | self.unmountOrder.reverse() | ||
| 490 | |||
| 491 | def cleanup(self): | ||
| 492 | Mount.cleanup(self) | ||
| 493 | if self.disks: | ||
| 494 | self.__unmap_partitions() | ||
| 495 | for dev in self.disks.keys(): | ||
| 496 | d = self.disks[dev] | ||
| 497 | try: | ||
| 498 | d['disk'].cleanup() | ||
| 499 | except: | ||
| 500 | pass | ||
| 501 | |||
| 502 | def unmount(self): | ||
| 503 | self.__unmount_subvolumes() | ||
| 504 | for mp in self.unmountOrder: | ||
| 505 | if mp == 'swap': | ||
| 506 | continue | ||
| 507 | p = None | ||
| 508 | for p1 in self.partitions: | ||
| 509 | if p1['mountpoint'] == mp: | ||
| 510 | p = p1 | ||
| 511 | break | ||
| 512 | |||
| 513 | if p['mount'] != None: | ||
| 514 | try: | ||
| 515 | # Create subvolume snapshot here | ||
| 516 | if p['fstype'] == "btrfs" and p['mountpoint'] == "/" and not self.snapshot_created: | ||
| 517 | self.__create_subvolume_snapshots(p, p["mount"]) | ||
| 518 | p['mount'].cleanup() | ||
| 519 | except: | ||
| 520 | pass | ||
| 521 | p['mount'] = None | ||
| 522 | |||
| 523 | # Only for btrfs | ||
| 524 | def __get_subvolume_id(self, rootpath, subvol): | ||
| 525 | if not self.btrfscmd: | ||
| 526 | self.btrfscmd=find_binary_path("btrfs") | ||
| 527 | argv = [ self.btrfscmd, "subvolume", "list", rootpath ] | ||
| 528 | |||
| 529 | rc, out = runner.runtool(argv) | ||
| 530 | msger.debug(out) | ||
| 531 | |||
| 532 | if rc != 0: | ||
| 533 | raise MountError("Failed to get subvolume id from %s', return code: %d." % (rootpath, rc)) | ||
| 534 | |||
| 535 | subvolid = -1 | ||
| 536 | for line in out.splitlines(): | ||
| 537 | if line.endswith(" path %s" % subvol): | ||
| 538 | subvolid = line.split()[1] | ||
| 539 | if not subvolid.isdigit(): | ||
| 540 | raise MountError("Invalid subvolume id: %s" % subvolid) | ||
| 541 | subvolid = int(subvolid) | ||
| 542 | break | ||
| 543 | return subvolid | ||
| 544 | |||
| 545 | def __create_subvolume_metadata(self, p, pdisk): | ||
| 546 | if len(self.subvolumes) == 0: | ||
| 547 | return | ||
| 548 | |||
| 549 | argv = [ self.btrfscmd, "subvolume", "list", pdisk.mountdir ] | ||
| 550 | rc, out = runner.runtool(argv) | ||
| 551 | msger.debug(out) | ||
| 552 | |||
| 553 | if rc != 0: | ||
| 554 | raise MountError("Failed to get subvolume id from %s', return code: %d." % (pdisk.mountdir, rc)) | ||
| 555 | |||
| 556 | subvolid_items = out.splitlines() | ||
| 557 | subvolume_metadata = "" | ||
| 558 | for subvol in self.subvolumes: | ||
| 559 | for line in subvolid_items: | ||
| 560 | if line.endswith(" path %s" % subvol["subvol"]): | ||
| 561 | subvolid = line.split()[1] | ||
| 562 | if not subvolid.isdigit(): | ||
| 563 | raise MountError("Invalid subvolume id: %s" % subvolid) | ||
| 564 | |||
| 565 | subvolid = int(subvolid) | ||
| 566 | opts = subvol["fsopts"].split(",") | ||
| 567 | for opt in opts: | ||
| 568 | if opt.strip().startswith("subvol="): | ||
| 569 | opts.remove(opt) | ||
| 570 | break | ||
| 571 | fsopts = ",".join(opts) | ||
| 572 | subvolume_metadata += "%d\t%s\t%s\t%s\n" % (subvolid, subvol["subvol"], subvol['mountpoint'], fsopts) | ||
| 573 | |||
| 574 | if subvolume_metadata: | ||
| 575 | fd = open("%s/.subvolume_metadata" % pdisk.mountdir, "w") | ||
| 576 | fd.write(subvolume_metadata) | ||
| 577 | fd.close() | ||
| 578 | |||
| 579 | def __get_subvolume_metadata(self, p, pdisk): | ||
| 580 | subvolume_metadata_file = "%s/.subvolume_metadata" % pdisk.mountdir | ||
| 581 | if not os.path.exists(subvolume_metadata_file): | ||
| 582 | return | ||
| 583 | |||
| 584 | fd = open(subvolume_metadata_file, "r") | ||
| 585 | content = fd.read() | ||
| 586 | fd.close() | ||
| 587 | |||
| 588 | for line in content.splitlines(): | ||
| 589 | items = line.split("\t") | ||
| 590 | if items and len(items) == 4: | ||
| 591 | self.subvolumes.append({'size': 0, # In sectors | ||
| 592 | 'mountpoint': items[2], # Mount relative to chroot | ||
| 593 | 'fstype': "btrfs", # Filesystem type | ||
| 594 | 'fsopts': items[3] + ",subvol=%s" % items[1], # Filesystem mount options | ||
| 595 | 'disk_name': p['disk_name'], # physical disk name holding partition | ||
| 596 | 'device': None, # kpartx device node for partition | ||
| 597 | 'mount': None, # Mount object | ||
| 598 | 'subvol': items[1], # Subvolume name | ||
| 599 | 'boot': False, # Bootable flag | ||
| 600 | 'mounted': False # Mount flag | ||
| 601 | }) | ||
| 602 | |||
| 603 | def __create_subvolumes(self, p, pdisk): | ||
| 604 | """ Create all the subvolumes. """ | ||
| 605 | |||
| 606 | for subvol in self.subvolumes: | ||
| 607 | argv = [ self.btrfscmd, "subvolume", "create", pdisk.mountdir + "/" + subvol["subvol"]] | ||
| 608 | |||
| 609 | rc = runner.show(argv) | ||
| 610 | if rc != 0: | ||
| 611 | raise MountError("Failed to create subvolume '%s', return code: %d." % (subvol["subvol"], rc)) | ||
| 612 | |||
| 613 | # Set default subvolume, subvolume for "/" is default | ||
| 614 | subvol = None | ||
| 615 | for subvolume in self.subvolumes: | ||
| 616 | if subvolume["mountpoint"] == "/" and p['disk_name'] == subvolume['disk_name']: | ||
| 617 | subvol = subvolume | ||
| 618 | break | ||
| 619 | |||
| 620 | if subvol: | ||
| 621 | # Get default subvolume id | ||
| 622 | subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"]) | ||
| 623 | # Set default subvolume | ||
| 624 | if subvolid != -1: | ||
| 625 | rc = runner.show([ self.btrfscmd, "subvolume", "set-default", "%d" % subvolid, pdisk.mountdir]) | ||
| 626 | if rc != 0: | ||
| 627 | raise MountError("Failed to set default subvolume id: %d', return code: %d." % (subvolid, rc)) | ||
| 628 | |||
| 629 | self.__create_subvolume_metadata(p, pdisk) | ||
| 630 | |||
| 631 | def __mount_subvolumes(self, p, pdisk): | ||
| 632 | if self.skipformat: | ||
| 633 | # Get subvolume info | ||
| 634 | self.__get_subvolume_metadata(p, pdisk) | ||
| 635 | # Set default mount options | ||
| 636 | if len(self.subvolumes) != 0: | ||
| 637 | for subvol in self.subvolumes: | ||
| 638 | if subvol["mountpoint"] == p["mountpoint"] == "/": | ||
| 639 | opts = subvol["fsopts"].split(",") | ||
| 640 | for opt in opts: | ||
| 641 | if opt.strip().startswith("subvol="): | ||
| 642 | opts.remove(opt) | ||
| 643 | break | ||
| 644 | pdisk.fsopts = ",".join(opts) | ||
| 645 | break | ||
| 646 | |||
| 647 | if len(self.subvolumes) == 0: | ||
| 648 | # Return directly if no subvolumes | ||
| 649 | return | ||
| 650 | |||
| 651 | # Remount to make default subvolume mounted | ||
| 652 | rc = runner.show([self.umountcmd, pdisk.mountdir]) | ||
| 653 | if rc != 0: | ||
| 654 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
| 655 | |||
| 656 | rc = runner.show([self.mountcmd, "-o", pdisk.fsopts, pdisk.disk.device, pdisk.mountdir]) | ||
| 657 | if rc != 0: | ||
| 658 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
| 659 | |||
| 660 | for subvol in self.subvolumes: | ||
| 661 | if subvol["mountpoint"] == "/": | ||
| 662 | continue | ||
| 663 | subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"]) | ||
| 664 | if subvolid == -1: | ||
| 665 | msger.debug("WARNING: invalid subvolume %s" % subvol["subvol"]) | ||
| 666 | continue | ||
| 667 | # Replace subvolume name with subvolume ID | ||
| 668 | opts = subvol["fsopts"].split(",") | ||
| 669 | for opt in opts: | ||
| 670 | if opt.strip().startswith("subvol="): | ||
| 671 | opts.remove(opt) | ||
| 672 | break | ||
| 673 | |||
| 674 | opts.extend(["subvolrootid=0", "subvol=%s" % subvol["subvol"]]) | ||
| 675 | fsopts = ",".join(opts) | ||
| 676 | subvol['fsopts'] = fsopts | ||
| 677 | mountpoint = self.mountdir + subvol['mountpoint'] | ||
| 678 | makedirs(mountpoint) | ||
| 679 | rc = runner.show([self.mountcmd, "-o", fsopts, pdisk.disk.device, mountpoint]) | ||
| 680 | if rc != 0: | ||
| 681 | raise MountError("Failed to mount subvolume %s to %s" % (subvol["subvol"], mountpoint)) | ||
| 682 | subvol["mounted"] = True | ||
| 683 | |||
| 684 | def __unmount_subvolumes(self): | ||
| 685 | """ It may be called multiple times, so we need to chekc if it is still mounted. """ | ||
| 686 | for subvol in self.subvolumes: | ||
| 687 | if subvol["mountpoint"] == "/": | ||
| 688 | continue | ||
| 689 | if not subvol["mounted"]: | ||
| 690 | continue | ||
| 691 | mountpoint = self.mountdir + subvol['mountpoint'] | ||
| 692 | rc = runner.show([self.umountcmd, mountpoint]) | ||
| 693 | if rc != 0: | ||
| 694 | raise MountError("Failed to unmount subvolume %s from %s" % (subvol["subvol"], mountpoint)) | ||
| 695 | subvol["mounted"] = False | ||
| 696 | |||
| 697 | def __create_subvolume_snapshots(self, p, pdisk): | ||
| 698 | import time | ||
| 699 | |||
| 700 | if self.snapshot_created: | ||
| 701 | return | ||
| 702 | |||
| 703 | # Remount with subvolid=0 | ||
| 704 | rc = runner.show([self.umountcmd, pdisk.mountdir]) | ||
| 705 | if rc != 0: | ||
| 706 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
| 707 | if pdisk.fsopts: | ||
| 708 | mountopts = pdisk.fsopts + ",subvolid=0" | ||
| 709 | else: | ||
| 710 | mountopts = "subvolid=0" | ||
| 711 | rc = runner.show([self.mountcmd, "-o", mountopts, pdisk.disk.device, pdisk.mountdir]) | ||
| 712 | if rc != 0: | ||
| 713 | raise MountError("Failed to umount %s" % pdisk.mountdir) | ||
| 714 | |||
| 715 | # Create all the subvolume snapshots | ||
| 716 | snapshotts = time.strftime("%Y%m%d-%H%M") | ||
| 717 | for subvol in self.subvolumes: | ||
| 718 | subvolpath = pdisk.mountdir + "/" + subvol["subvol"] | ||
| 719 | snapshotpath = subvolpath + "_%s-1" % snapshotts | ||
| 720 | rc = runner.show([ self.btrfscmd, "subvolume", "snapshot", subvolpath, snapshotpath ]) | ||
| 721 | if rc != 0: | ||
| 722 | raise MountError("Failed to create subvolume snapshot '%s' for '%s', return code: %d." % (snapshotpath, subvolpath, rc)) | ||
| 723 | |||
| 724 | self.snapshot_created = True | ||
| 725 | |||
| 726 | def mount(self): | ||
| 727 | for dev in self.disks.keys(): | ||
| 728 | d = self.disks[dev] | ||
| 729 | d['disk'].create() | ||
| 730 | |||
| 731 | self.__format_disks() | ||
| 732 | self.__map_partitions() | ||
| 733 | self.__calculate_mountorder() | ||
| 734 | |||
| 735 | for mp in self.mountOrder: | ||
| 736 | p = None | ||
| 737 | for p1 in self.partitions: | ||
| 738 | if p1['mountpoint'] == mp: | ||
| 739 | p = p1 | ||
| 740 | break | ||
| 741 | |||
| 742 | if not p['label']: | ||
| 743 | if p['mountpoint'] == "/": | ||
| 744 | p['label'] = 'platform' | ||
| 745 | else: | ||
| 746 | p['label'] = mp.split('/')[-1] | ||
| 747 | |||
| 748 | if mp == 'swap': | ||
| 749 | import uuid | ||
| 750 | p['uuid'] = str(uuid.uuid1()) | ||
| 751 | runner.show([self.mkswap, | ||
| 752 | '-L', p['label'], | ||
| 753 | '-U', p['uuid'], | ||
| 754 | p['device']]) | ||
| 755 | continue | ||
| 756 | |||
| 757 | rmmountdir = False | ||
| 758 | if p['mountpoint'] == "/": | ||
| 759 | rmmountdir = True | ||
| 760 | if p['fstype'] == "vfat" or p['fstype'] == "msdos": | ||
| 761 | myDiskMount = VfatDiskMount | ||
| 762 | elif p['fstype'] in ("ext2", "ext3", "ext4"): | ||
| 763 | myDiskMount = ExtDiskMount | ||
| 764 | elif p['fstype'] == "btrfs": | ||
| 765 | myDiskMount = BtrfsDiskMount | ||
| 766 | else: | ||
| 767 | raise MountError("Fail to support file system " + p['fstype']) | ||
| 768 | |||
| 769 | if p['fstype'] == "btrfs" and not p['fsopts']: | ||
| 770 | p['fsopts'] = "subvolid=0" | ||
| 771 | |||
| 772 | pdisk = myDiskMount(RawDisk(p['size'] * self.sector_size, p['device']), | ||
| 773 | self.mountdir + p['mountpoint'], | ||
| 774 | p['fstype'], | ||
| 775 | 4096, | ||
| 776 | p['label'], | ||
| 777 | rmmountdir, | ||
| 778 | self.skipformat, | ||
| 779 | fsopts = p['fsopts']) | ||
| 780 | pdisk.mount(pdisk.fsopts) | ||
| 781 | if p['fstype'] == "btrfs" and p['mountpoint'] == "/": | ||
| 782 | if not self.skipformat: | ||
| 783 | self.__create_subvolumes(p, pdisk) | ||
| 784 | self.__mount_subvolumes(p, pdisk) | ||
| 785 | p['mount'] = pdisk | ||
| 786 | p['uuid'] = pdisk.uuid | ||
| 787 | |||
| 788 | def resparse(self, size = None): | ||
| 789 | # Can't re-sparse a disk image - too hard | ||
| 790 | pass | ||
diff --git a/scripts/lib/mic/utils/proxy.py b/scripts/lib/mic/utils/proxy.py new file mode 100644 index 0000000000..91451a2d01 --- /dev/null +++ b/scripts/lib/mic/utils/proxy.py | |||
| @@ -0,0 +1,183 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2010, 2011 Intel, Inc. | ||
| 4 | # | ||
| 5 | # This program is free software; you can redistribute it and/or modify it | ||
| 6 | # under the terms of the GNU General Public License as published by the Free | ||
| 7 | # Software Foundation; version 2 of the License | ||
| 8 | # | ||
| 9 | # This program is distributed in the hope that it will be useful, but | ||
| 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 12 | # for more details. | ||
| 13 | # | ||
| 14 | # You should have received a copy of the GNU General Public License along | ||
| 15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | |||
| 18 | import os | ||
| 19 | import urlparse | ||
| 20 | |||
| 21 | _my_proxies = {} | ||
| 22 | _my_noproxy = None | ||
| 23 | _my_noproxy_list = [] | ||
| 24 | |||
| 25 | def set_proxy_environ(): | ||
| 26 | global _my_noproxy, _my_proxies | ||
| 27 | if not _my_proxies: | ||
| 28 | return | ||
| 29 | for key in _my_proxies.keys(): | ||
| 30 | os.environ[key + "_proxy"] = _my_proxies[key] | ||
| 31 | if not _my_noproxy: | ||
| 32 | return | ||
| 33 | os.environ["no_proxy"] = _my_noproxy | ||
| 34 | |||
| 35 | def unset_proxy_environ(): | ||
| 36 | for env in ('http_proxy', | ||
| 37 | 'https_proxy', | ||
| 38 | 'ftp_proxy', | ||
| 39 | 'all_proxy'): | ||
| 40 | if env in os.environ: | ||
| 41 | del os.environ[env] | ||
| 42 | |||
| 43 | ENV=env.upper() | ||
| 44 | if ENV in os.environ: | ||
| 45 | del os.environ[ENV] | ||
| 46 | |||
| 47 | def _set_proxies(proxy = None, no_proxy = None): | ||
| 48 | """Return a dictionary of scheme -> proxy server URL mappings. | ||
| 49 | """ | ||
| 50 | |||
| 51 | global _my_noproxy, _my_proxies | ||
| 52 | _my_proxies = {} | ||
| 53 | _my_noproxy = None | ||
| 54 | proxies = [] | ||
| 55 | if proxy: | ||
| 56 | proxies.append(("http_proxy", proxy)) | ||
| 57 | if no_proxy: | ||
| 58 | proxies.append(("no_proxy", no_proxy)) | ||
| 59 | |||
| 60 | # Get proxy settings from environment if not provided | ||
| 61 | if not proxy and not no_proxy: | ||
| 62 | proxies = os.environ.items() | ||
| 63 | |||
| 64 | # Remove proxy env variables, urllib2 can't handle them correctly | ||
| 65 | unset_proxy_environ() | ||
| 66 | |||
| 67 | for name, value in proxies: | ||
| 68 | name = name.lower() | ||
| 69 | if value and name[-6:] == '_proxy': | ||
| 70 | if name[0:2] != "no": | ||
| 71 | _my_proxies[name[:-6]] = value | ||
| 72 | else: | ||
| 73 | _my_noproxy = value | ||
| 74 | |||
| 75 | def _ip_to_int(ip): | ||
| 76 | ipint=0 | ||
| 77 | shift=24 | ||
| 78 | for dec in ip.split("."): | ||
| 79 | ipint |= int(dec) << shift | ||
| 80 | shift -= 8 | ||
| 81 | return ipint | ||
| 82 | |||
| 83 | def _int_to_ip(val): | ||
| 84 | ipaddr="" | ||
| 85 | shift=0 | ||
| 86 | for i in range(4): | ||
| 87 | dec = val >> shift | ||
| 88 | dec &= 0xff | ||
| 89 | ipaddr = ".%d%s" % (dec, ipaddr) | ||
| 90 | shift += 8 | ||
| 91 | return ipaddr[1:] | ||
| 92 | |||
| 93 | def _isip(host): | ||
| 94 | if host.replace(".", "").isdigit(): | ||
| 95 | return True | ||
| 96 | return False | ||
| 97 | |||
| 98 | def _set_noproxy_list(): | ||
| 99 | global _my_noproxy, _my_noproxy_list | ||
| 100 | _my_noproxy_list = [] | ||
| 101 | if not _my_noproxy: | ||
| 102 | return | ||
| 103 | for item in _my_noproxy.split(","): | ||
| 104 | item = item.strip() | ||
| 105 | if not item: | ||
| 106 | continue | ||
| 107 | |||
| 108 | if item[0] != '.' and item.find("/") == -1: | ||
| 109 | # Need to match it | ||
| 110 | _my_noproxy_list.append({"match":0,"needle":item}) | ||
| 111 | |||
| 112 | elif item[0] == '.': | ||
| 113 | # Need to match at tail | ||
| 114 | _my_noproxy_list.append({"match":1,"needle":item}) | ||
| 115 | |||
| 116 | elif item.find("/") > 3: | ||
| 117 | # IP/MASK, need to match at head | ||
| 118 | needle = item[0:item.find("/")].strip() | ||
| 119 | ip = _ip_to_int(needle) | ||
| 120 | netmask = 0 | ||
| 121 | mask = item[item.find("/")+1:].strip() | ||
| 122 | |||
| 123 | if mask.isdigit(): | ||
| 124 | netmask = int(mask) | ||
| 125 | netmask = ~((1<<(32-netmask)) - 1) | ||
| 126 | ip &= netmask | ||
| 127 | else: | ||
| 128 | shift=24 | ||
| 129 | netmask=0 | ||
| 130 | for dec in mask.split("."): | ||
| 131 | netmask |= int(dec) << shift | ||
| 132 | shift -= 8 | ||
| 133 | ip &= netmask | ||
| 134 | |||
| 135 | _my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask}) | ||
| 136 | |||
| 137 | def _isnoproxy(url): | ||
| 138 | (scheme, host, path, parm, query, frag) = urlparse.urlparse(url) | ||
| 139 | |||
| 140 | if '@' in host: | ||
| 141 | user_pass, host = host.split('@', 1) | ||
| 142 | |||
| 143 | if ':' in host: | ||
| 144 | host, port = host.split(':', 1) | ||
| 145 | |||
| 146 | hostisip = _isip(host) | ||
| 147 | for item in _my_noproxy_list: | ||
| 148 | if hostisip and item["match"] <= 1: | ||
| 149 | continue | ||
| 150 | |||
| 151 | if item["match"] == 2 and hostisip: | ||
| 152 | if (_ip_to_int(host) & item["netmask"]) == item["needle"]: | ||
| 153 | return True | ||
| 154 | |||
| 155 | if item["match"] == 0: | ||
| 156 | if host == item["needle"]: | ||
| 157 | return True | ||
| 158 | |||
| 159 | if item["match"] == 1: | ||
| 160 | if host.rfind(item["needle"]) > 0: | ||
| 161 | return True | ||
| 162 | |||
| 163 | return False | ||
| 164 | |||
| 165 | def set_proxies(proxy = None, no_proxy = None): | ||
| 166 | _set_proxies(proxy, no_proxy) | ||
| 167 | _set_noproxy_list() | ||
| 168 | set_proxy_environ() | ||
| 169 | |||
| 170 | def get_proxy_for(url): | ||
| 171 | if url.startswith('file:') or _isnoproxy(url): | ||
| 172 | return None | ||
| 173 | |||
| 174 | type = url[0:url.index(":")] | ||
| 175 | proxy = None | ||
| 176 | if _my_proxies.has_key(type): | ||
| 177 | proxy = _my_proxies[type] | ||
| 178 | elif _my_proxies.has_key("http"): | ||
| 179 | proxy = _my_proxies["http"] | ||
| 180 | else: | ||
| 181 | proxy = None | ||
| 182 | |||
| 183 | return proxy | ||
diff --git a/scripts/lib/mic/utils/rpmmisc.py b/scripts/lib/mic/utils/rpmmisc.py new file mode 100644 index 0000000000..af15763e18 --- /dev/null +++ b/scripts/lib/mic/utils/rpmmisc.py | |||
| @@ -0,0 +1,600 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2008, 2009, 2010, 2011 Intel, Inc. | ||
| 4 | # | ||
| 5 | # This program is free software; you can redistribute it and/or modify it | ||
| 6 | # under the terms of the GNU General Public License as published by the Free | ||
| 7 | # Software Foundation; version 2 of the License | ||
| 8 | # | ||
| 9 | # This program is distributed in the hope that it will be useful, but | ||
| 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 12 | # for more details. | ||
| 13 | # | ||
| 14 | # You should have received a copy of the GNU General Public License along | ||
| 15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | |||
| 18 | import os | ||
| 19 | import sys | ||
| 20 | import re | ||
| 21 | import rpm | ||
| 22 | |||
| 23 | from mic import msger | ||
| 24 | from mic.utils.errors import CreatorError | ||
| 25 | from mic.utils.proxy import get_proxy_for | ||
| 26 | from mic.utils import runner | ||
| 27 | |||
| 28 | |||
| 29 | class RPMInstallCallback: | ||
| 30 | """ Command line callback class for callbacks from the RPM library. | ||
| 31 | """ | ||
| 32 | |||
| 33 | def __init__(self, ts, output=1): | ||
| 34 | self.output = output | ||
| 35 | self.callbackfilehandles = {} | ||
| 36 | self.total_actions = 0 | ||
| 37 | self.total_installed = 0 | ||
| 38 | self.installed_pkg_names = [] | ||
| 39 | self.total_removed = 0 | ||
| 40 | self.mark = "+" | ||
| 41 | self.marks = 40 | ||
| 42 | self.lastmsg = None | ||
| 43 | self.tsInfo = None # this needs to be set for anything else to work | ||
| 44 | self.ts = ts | ||
| 45 | self.filelog = False | ||
| 46 | self.logString = [] | ||
| 47 | self.headmsg = "Installing" | ||
| 48 | |||
| 49 | def _dopkgtup(self, hdr): | ||
| 50 | tmpepoch = hdr['epoch'] | ||
| 51 | if tmpepoch is None: epoch = '0' | ||
| 52 | else: epoch = str(tmpepoch) | ||
| 53 | |||
| 54 | return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release']) | ||
| 55 | |||
| 56 | def _makeHandle(self, hdr): | ||
| 57 | handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'], | ||
| 58 | hdr['release'], hdr['arch']) | ||
| 59 | |||
| 60 | return handle | ||
| 61 | |||
| 62 | def _localprint(self, msg): | ||
| 63 | if self.output: | ||
| 64 | msger.info(msg) | ||
| 65 | |||
| 66 | def _makefmt(self, percent, progress = True): | ||
| 67 | l = len(str(self.total_actions)) | ||
| 68 | size = "%s.%s" % (l, l) | ||
| 69 | fmt_done = "[%" + size + "s/%" + size + "s]" | ||
| 70 | done = fmt_done % (self.total_installed + self.total_removed, | ||
| 71 | self.total_actions) | ||
| 72 | marks = self.marks - (2 * l) | ||
| 73 | width = "%s.%s" % (marks, marks) | ||
| 74 | fmt_bar = "%-" + width + "s" | ||
| 75 | if progress: | ||
| 76 | bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), ) | ||
| 77 | fmt = "\r %-10.10s: %-20.20s " + bar + " " + done | ||
| 78 | else: | ||
| 79 | bar = fmt_bar % (self.mark * marks, ) | ||
| 80 | fmt = " %-10.10s: %-20.20s " + bar + " " + done | ||
| 81 | return fmt | ||
| 82 | |||
| 83 | def _logPkgString(self, hdr): | ||
| 84 | """return nice representation of the package for the log""" | ||
| 85 | (n,a,e,v,r) = self._dopkgtup(hdr) | ||
| 86 | if e == '0': | ||
| 87 | pkg = '%s.%s %s-%s' % (n, a, v, r) | ||
| 88 | else: | ||
| 89 | pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r) | ||
| 90 | |||
| 91 | return pkg | ||
| 92 | |||
| 93 | def callback(self, what, bytes, total, h, user): | ||
| 94 | if what == rpm.RPMCALLBACK_TRANS_START: | ||
| 95 | if bytes == 6: | ||
| 96 | self.total_actions = total | ||
| 97 | |||
| 98 | elif what == rpm.RPMCALLBACK_TRANS_PROGRESS: | ||
| 99 | pass | ||
| 100 | |||
| 101 | elif what == rpm.RPMCALLBACK_TRANS_STOP: | ||
| 102 | pass | ||
| 103 | |||
| 104 | elif what == rpm.RPMCALLBACK_INST_OPEN_FILE: | ||
| 105 | self.lastmsg = None | ||
| 106 | hdr = None | ||
| 107 | if h is not None: | ||
| 108 | try: | ||
| 109 | hdr, rpmloc = h | ||
| 110 | except: | ||
| 111 | rpmloc = h | ||
| 112 | hdr = readRpmHeader(self.ts, h) | ||
| 113 | |||
| 114 | handle = self._makeHandle(hdr) | ||
| 115 | fd = os.open(rpmloc, os.O_RDONLY) | ||
| 116 | self.callbackfilehandles[handle]=fd | ||
| 117 | if hdr['name'] not in self.installed_pkg_names: | ||
| 118 | self.installed_pkg_names.append(hdr['name']) | ||
| 119 | self.total_installed += 1 | ||
| 120 | return fd | ||
| 121 | else: | ||
| 122 | self._localprint("No header - huh?") | ||
| 123 | |||
| 124 | elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE: | ||
| 125 | hdr = None | ||
| 126 | if h is not None: | ||
| 127 | try: | ||
| 128 | hdr, rpmloc = h | ||
| 129 | except: | ||
| 130 | rpmloc = h | ||
| 131 | hdr = readRpmHeader(self.ts, h) | ||
| 132 | |||
| 133 | handle = self._makeHandle(hdr) | ||
| 134 | os.close(self.callbackfilehandles[handle]) | ||
| 135 | fd = 0 | ||
| 136 | |||
| 137 | # log stuff | ||
| 138 | #pkgtup = self._dopkgtup(hdr) | ||
| 139 | self.logString.append(self._logPkgString(hdr)) | ||
| 140 | |||
| 141 | elif what == rpm.RPMCALLBACK_INST_PROGRESS: | ||
| 142 | if h is not None: | ||
| 143 | percent = (self.total_installed*100L)/self.total_actions | ||
| 144 | if total > 0: | ||
| 145 | try: | ||
| 146 | hdr, rpmloc = h | ||
| 147 | except: | ||
| 148 | rpmloc = h | ||
| 149 | |||
| 150 | m = re.match("(.*)-(\d+.*)-(\d+\.\d+)\.(.+)\.rpm", os.path.basename(rpmloc)) | ||
| 151 | if m: | ||
| 152 | pkgname = m.group(1) | ||
| 153 | else: | ||
| 154 | pkgname = os.path.basename(rpmloc) | ||
| 155 | if self.output: | ||
| 156 | fmt = self._makefmt(percent) | ||
| 157 | msg = fmt % (self.headmsg, pkgname) | ||
| 158 | if msg != self.lastmsg: | ||
| 159 | self.lastmsg = msg | ||
| 160 | |||
| 161 | msger.info(msg) | ||
| 162 | |||
| 163 | if self.total_installed == self.total_actions: | ||
| 164 | msger.raw('') | ||
| 165 | msger.verbose('\n'.join(self.logString)) | ||
| 166 | |||
| 167 | elif what == rpm.RPMCALLBACK_UNINST_START: | ||
| 168 | pass | ||
| 169 | |||
| 170 | elif what == rpm.RPMCALLBACK_UNINST_PROGRESS: | ||
| 171 | pass | ||
| 172 | |||
| 173 | elif what == rpm.RPMCALLBACK_UNINST_STOP: | ||
| 174 | self.total_removed += 1 | ||
| 175 | |||
| 176 | elif what == rpm.RPMCALLBACK_REPACKAGE_START: | ||
| 177 | pass | ||
| 178 | |||
| 179 | elif what == rpm.RPMCALLBACK_REPACKAGE_STOP: | ||
| 180 | pass | ||
| 181 | |||
| 182 | elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: | ||
| 183 | pass | ||
| 184 | |||
| 185 | def readRpmHeader(ts, filename): | ||
| 186 | """ Read an rpm header. """ | ||
| 187 | |||
| 188 | fd = os.open(filename, os.O_RDONLY) | ||
| 189 | h = ts.hdrFromFdno(fd) | ||
| 190 | os.close(fd) | ||
| 191 | return h | ||
| 192 | |||
| 193 | def splitFilename(filename): | ||
| 194 | """ Pass in a standard style rpm fullname | ||
| 195 | |||
| 196 | Return a name, version, release, epoch, arch, e.g.:: | ||
| 197 | foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386 | ||
| 198 | 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64 | ||
| 199 | """ | ||
| 200 | |||
| 201 | if filename[-4:] == '.rpm': | ||
| 202 | filename = filename[:-4] | ||
| 203 | |||
| 204 | archIndex = filename.rfind('.') | ||
| 205 | arch = filename[archIndex+1:] | ||
| 206 | |||
| 207 | relIndex = filename[:archIndex].rfind('-') | ||
| 208 | rel = filename[relIndex+1:archIndex] | ||
| 209 | |||
| 210 | verIndex = filename[:relIndex].rfind('-') | ||
| 211 | ver = filename[verIndex+1:relIndex] | ||
| 212 | |||
| 213 | epochIndex = filename.find(':') | ||
| 214 | if epochIndex == -1: | ||
| 215 | epoch = '' | ||
| 216 | else: | ||
| 217 | epoch = filename[:epochIndex] | ||
| 218 | |||
| 219 | name = filename[epochIndex + 1:verIndex] | ||
| 220 | return name, ver, rel, epoch, arch | ||
| 221 | |||
| 222 | def getCanonX86Arch(arch): | ||
| 223 | # | ||
| 224 | if arch == "i586": | ||
| 225 | f = open("/proc/cpuinfo", "r") | ||
| 226 | lines = f.readlines() | ||
| 227 | f.close() | ||
| 228 | for line in lines: | ||
| 229 | if line.startswith("model name") and line.find("Geode(TM)") != -1: | ||
| 230 | return "geode" | ||
| 231 | return arch | ||
| 232 | # only athlon vs i686 isn't handled with uname currently | ||
| 233 | if arch != "i686": | ||
| 234 | return arch | ||
| 235 | |||
| 236 | # if we're i686 and AuthenticAMD, then we should be an athlon | ||
| 237 | f = open("/proc/cpuinfo", "r") | ||
| 238 | lines = f.readlines() | ||
| 239 | f.close() | ||
| 240 | for line in lines: | ||
| 241 | if line.startswith("vendor") and line.find("AuthenticAMD") != -1: | ||
| 242 | return "athlon" | ||
| 243 | # i686 doesn't guarantee cmov, but we depend on it | ||
| 244 | elif line.startswith("flags") and line.find("cmov") == -1: | ||
| 245 | return "i586" | ||
| 246 | |||
| 247 | return arch | ||
| 248 | |||
| 249 | def getCanonX86_64Arch(arch): | ||
| 250 | if arch != "x86_64": | ||
| 251 | return arch | ||
| 252 | |||
| 253 | vendor = None | ||
| 254 | f = open("/proc/cpuinfo", "r") | ||
| 255 | lines = f.readlines() | ||
| 256 | f.close() | ||
| 257 | for line in lines: | ||
| 258 | if line.startswith("vendor_id"): | ||
| 259 | vendor = line.split(':')[1] | ||
| 260 | break | ||
| 261 | if vendor is None: | ||
| 262 | return arch | ||
| 263 | |||
| 264 | if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1: | ||
| 265 | return "amd64" | ||
| 266 | if vendor.find("GenuineIntel") != -1: | ||
| 267 | return "ia32e" | ||
| 268 | return arch | ||
| 269 | |||
| 270 | def getCanonArch(): | ||
| 271 | arch = os.uname()[4] | ||
| 272 | |||
| 273 | if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"): | ||
| 274 | return getCanonX86Arch(arch) | ||
| 275 | |||
| 276 | if arch == "x86_64": | ||
| 277 | return getCanonX86_64Arch(arch) | ||
| 278 | |||
| 279 | return arch | ||
| 280 | |||
| 281 | # Copy from libsatsolver:poolarch.c, with cleanup | ||
| 282 | archPolicies = { | ||
| 283 | "x86_64": "x86_64:i686:i586:i486:i386", | ||
| 284 | "i686": "i686:i586:i486:i386", | ||
| 285 | "i586": "i586:i486:i386", | ||
| 286 | "ia64": "ia64:i686:i586:i486:i386", | ||
| 287 | "armv7tnhl": "armv7tnhl:armv7thl:armv7nhl:armv7hl", | ||
| 288 | "armv7thl": "armv7thl:armv7hl", | ||
| 289 | "armv7nhl": "armv7nhl:armv7hl", | ||
| 290 | "armv7hl": "armv7hl", | ||
| 291 | "armv7l": "armv7l:armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
| 292 | "armv6l": "armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
| 293 | "armv5tejl": "armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
| 294 | "armv5tel": "armv5tel:armv5l:armv4tl:armv4l:armv3l", | ||
| 295 | "armv5l": "armv5l:armv4tl:armv4l:armv3l", | ||
| 296 | } | ||
| 297 | |||
| 298 | # dict mapping arch -> ( multicompat, best personality, biarch personality ) | ||
| 299 | multilibArches = { | ||
| 300 | "x86_64": ( "athlon", "x86_64", "athlon" ), | ||
| 301 | } | ||
| 302 | |||
| 303 | # from yumUtils.py | ||
| 304 | arches = { | ||
| 305 | # ia32 | ||
| 306 | "athlon": "i686", | ||
| 307 | "i686": "i586", | ||
| 308 | "geode": "i586", | ||
| 309 | "i586": "i486", | ||
| 310 | "i486": "i386", | ||
| 311 | "i386": "noarch", | ||
| 312 | |||
| 313 | # amd64 | ||
| 314 | "x86_64": "athlon", | ||
| 315 | "amd64": "x86_64", | ||
| 316 | "ia32e": "x86_64", | ||
| 317 | |||
| 318 | # arm | ||
| 319 | "armv7tnhl": "armv7nhl", | ||
| 320 | "armv7nhl": "armv7hl", | ||
| 321 | "armv7hl": "noarch", | ||
| 322 | "armv7l": "armv6l", | ||
| 323 | "armv6l": "armv5tejl", | ||
| 324 | "armv5tejl": "armv5tel", | ||
| 325 | "armv5tel": "noarch", | ||
| 326 | |||
| 327 | #itanium | ||
| 328 | "ia64": "noarch", | ||
| 329 | } | ||
| 330 | |||
| 331 | def isMultiLibArch(arch=None): | ||
| 332 | """returns true if arch is a multilib arch, false if not""" | ||
| 333 | if arch is None: | ||
| 334 | arch = getCanonArch() | ||
| 335 | |||
| 336 | if not arches.has_key(arch): # or we could check if it is noarch | ||
| 337 | return False | ||
| 338 | |||
| 339 | if multilibArches.has_key(arch): | ||
| 340 | return True | ||
| 341 | |||
| 342 | if multilibArches.has_key(arches[arch]): | ||
| 343 | return True | ||
| 344 | |||
| 345 | return False | ||
| 346 | |||
| 347 | def getBaseArch(): | ||
| 348 | myarch = getCanonArch() | ||
| 349 | if not arches.has_key(myarch): | ||
| 350 | return myarch | ||
| 351 | |||
| 352 | if isMultiLibArch(arch=myarch): | ||
| 353 | if multilibArches.has_key(myarch): | ||
| 354 | return myarch | ||
| 355 | else: | ||
| 356 | return arches[myarch] | ||
| 357 | |||
| 358 | if arches.has_key(myarch): | ||
| 359 | basearch = myarch | ||
| 360 | value = arches[basearch] | ||
| 361 | while value != 'noarch': | ||
| 362 | basearch = value | ||
| 363 | value = arches[basearch] | ||
| 364 | |||
| 365 | return basearch | ||
| 366 | |||
| 367 | def checkRpmIntegrity(bin_rpm, package): | ||
| 368 | return runner.quiet([bin_rpm, "-K", "--nosignature", package]) | ||
| 369 | |||
| 370 | def checkSig(ts, package): | ||
| 371 | """ Takes a transaction set and a package, check it's sigs, | ||
| 372 | return 0 if they are all fine | ||
| 373 | return 1 if the gpg key can't be found | ||
| 374 | return 2 if the header is in someway damaged | ||
| 375 | return 3 if the key is not trusted | ||
| 376 | return 4 if the pkg is not gpg or pgp signed | ||
| 377 | """ | ||
| 378 | |||
| 379 | value = 0 | ||
| 380 | currentflags = ts.setVSFlags(0) | ||
| 381 | fdno = os.open(package, os.O_RDONLY) | ||
| 382 | try: | ||
| 383 | hdr = ts.hdrFromFdno(fdno) | ||
| 384 | |||
| 385 | except rpm.error, e: | ||
| 386 | if str(e) == "public key not availaiable": | ||
| 387 | value = 1 | ||
| 388 | if str(e) == "public key not available": | ||
| 389 | value = 1 | ||
| 390 | if str(e) == "public key not trusted": | ||
| 391 | value = 3 | ||
| 392 | if str(e) == "error reading package header": | ||
| 393 | value = 2 | ||
| 394 | else: | ||
| 395 | error, siginfo = getSigInfo(hdr) | ||
| 396 | if error == 101: | ||
| 397 | os.close(fdno) | ||
| 398 | del hdr | ||
| 399 | value = 4 | ||
| 400 | else: | ||
| 401 | del hdr | ||
| 402 | |||
| 403 | try: | ||
| 404 | os.close(fdno) | ||
| 405 | except OSError: | ||
| 406 | pass | ||
| 407 | |||
| 408 | ts.setVSFlags(currentflags) # put things back like they were before | ||
| 409 | return value | ||
| 410 | |||
| 411 | def getSigInfo(hdr): | ||
| 412 | """ checks signature from an hdr hand back signature information and/or | ||
| 413 | an error code | ||
| 414 | """ | ||
| 415 | |||
| 416 | import locale | ||
| 417 | locale.setlocale(locale.LC_ALL, 'C') | ||
| 418 | |||
| 419 | string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' | ||
| 420 | siginfo = hdr.sprintf(string) | ||
| 421 | if siginfo != '(none)': | ||
| 422 | error = 0 | ||
| 423 | sigtype, sigdate, sigid = siginfo.split(',') | ||
| 424 | else: | ||
| 425 | error = 101 | ||
| 426 | sigtype = 'MD5' | ||
| 427 | sigdate = 'None' | ||
| 428 | sigid = 'None' | ||
| 429 | |||
| 430 | infotuple = (sigtype, sigdate, sigid) | ||
| 431 | return error, infotuple | ||
| 432 | |||
| 433 | def checkRepositoryEULA(name, repo): | ||
| 434 | """ This function is to check the EULA file if provided. | ||
| 435 | return True: no EULA or accepted | ||
| 436 | return False: user declined the EULA | ||
| 437 | """ | ||
| 438 | |||
| 439 | import tempfile | ||
| 440 | import shutil | ||
| 441 | import urlparse | ||
| 442 | import urllib2 as u2 | ||
| 443 | import httplib | ||
| 444 | from mic.utils.errors import CreatorError | ||
| 445 | |||
| 446 | def _check_and_download_url(u2opener, url, savepath): | ||
| 447 | try: | ||
| 448 | if u2opener: | ||
| 449 | f = u2opener.open(url) | ||
| 450 | else: | ||
| 451 | f = u2.urlopen(url) | ||
| 452 | except u2.HTTPError, httperror: | ||
| 453 | if httperror.code in (404, 503): | ||
| 454 | return None | ||
| 455 | else: | ||
| 456 | raise CreatorError(httperror) | ||
| 457 | except OSError, oserr: | ||
| 458 | if oserr.errno == 2: | ||
| 459 | return None | ||
| 460 | else: | ||
| 461 | raise CreatorError(oserr) | ||
| 462 | except IOError, oserr: | ||
| 463 | if hasattr(oserr, "reason") and oserr.reason.errno == 2: | ||
| 464 | return None | ||
| 465 | else: | ||
| 466 | raise CreatorError(oserr) | ||
| 467 | except u2.URLError, err: | ||
| 468 | raise CreatorError(err) | ||
| 469 | except httplib.HTTPException, e: | ||
| 470 | raise CreatorError(e) | ||
| 471 | |||
| 472 | # save to file | ||
| 473 | licf = open(savepath, "w") | ||
| 474 | licf.write(f.read()) | ||
| 475 | licf.close() | ||
| 476 | f.close() | ||
| 477 | |||
| 478 | return savepath | ||
| 479 | |||
| 480 | def _pager_file(savepath): | ||
| 481 | |||
| 482 | if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'): | ||
| 483 | pagers = ('w3m', 'links', 'lynx', 'less', 'more') | ||
| 484 | else: | ||
| 485 | pagers = ('less', 'more') | ||
| 486 | |||
| 487 | file_showed = False | ||
| 488 | for pager in pagers: | ||
| 489 | cmd = "%s %s" % (pager, savepath) | ||
| 490 | try: | ||
| 491 | os.system(cmd) | ||
| 492 | except OSError: | ||
| 493 | continue | ||
| 494 | else: | ||
| 495 | file_showed = True | ||
| 496 | break | ||
| 497 | |||
| 498 | if not file_showed: | ||
| 499 | f = open(savepath) | ||
| 500 | msger.raw(f.read()) | ||
| 501 | f.close() | ||
| 502 | msger.pause() | ||
| 503 | |||
| 504 | # when proxy needed, make urllib2 follow it | ||
| 505 | proxy = repo.proxy | ||
| 506 | proxy_username = repo.proxy_username | ||
| 507 | proxy_password = repo.proxy_password | ||
| 508 | |||
| 509 | if not proxy: | ||
| 510 | proxy = get_proxy_for(repo.baseurl[0]) | ||
| 511 | |||
| 512 | handlers = [] | ||
| 513 | auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm()) | ||
| 514 | u2opener = None | ||
| 515 | if proxy: | ||
| 516 | if proxy_username: | ||
| 517 | proxy_netloc = urlparse.urlsplit(proxy).netloc | ||
| 518 | if proxy_password: | ||
| 519 | proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc) | ||
| 520 | else: | ||
| 521 | proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc) | ||
| 522 | else: | ||
| 523 | proxy_url = proxy | ||
| 524 | |||
| 525 | proxy_support = u2.ProxyHandler({'http': proxy_url, | ||
| 526 | 'https': proxy_url, | ||
| 527 | 'ftp': proxy_url}) | ||
| 528 | handlers.append(proxy_support) | ||
| 529 | |||
| 530 | # download all remote files to one temp dir | ||
| 531 | baseurl = None | ||
| 532 | repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic') | ||
| 533 | |||
| 534 | for url in repo.baseurl: | ||
| 535 | tmphandlers = handlers[:] | ||
| 536 | |||
| 537 | (scheme, host, path, parm, query, frag) = urlparse.urlparse(url.rstrip('/') + '/') | ||
| 538 | if scheme not in ("http", "https", "ftp", "ftps", "file"): | ||
| 539 | raise CreatorError("Error: invalid url %s" % url) | ||
| 540 | |||
| 541 | if '@' in host: | ||
| 542 | try: | ||
| 543 | user_pass, host = host.split('@', 1) | ||
| 544 | if ':' in user_pass: | ||
| 545 | user, password = user_pass.split(':', 1) | ||
| 546 | except ValueError, e: | ||
| 547 | raise CreatorError('Bad URL: %s' % url) | ||
| 548 | |||
| 549 | msger.verbose("adding HTTP auth: %s, XXXXXXXX" %(user)) | ||
| 550 | auth_handler.add_password(None, host, user, password) | ||
| 551 | tmphandlers.append(auth_handler) | ||
| 552 | url = scheme + "://" + host + path + parm + query + frag | ||
| 553 | |||
| 554 | if tmphandlers: | ||
| 555 | u2opener = u2.build_opener(*tmphandlers) | ||
| 556 | |||
| 557 | # try to download | ||
| 558 | repo_eula_url = urlparse.urljoin(url, "LICENSE.txt") | ||
| 559 | repo_eula_path = _check_and_download_url( | ||
| 560 | u2opener, | ||
| 561 | repo_eula_url, | ||
| 562 | os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt')) | ||
| 563 | if repo_eula_path: | ||
| 564 | # found | ||
| 565 | baseurl = url | ||
| 566 | break | ||
| 567 | |||
| 568 | if not baseurl: | ||
| 569 | shutil.rmtree(repo_lic_dir) #cleanup | ||
| 570 | return True | ||
| 571 | |||
| 572 | # show the license file | ||
| 573 | msger.info('For the software packages in this yum repo:') | ||
| 574 | msger.info(' %s: %s' % (name, baseurl)) | ||
| 575 | msger.info('There is an "End User License Agreement" file that need to be checked.') | ||
| 576 | msger.info('Please read the terms and conditions outlined in it and answer the followed qustions.') | ||
| 577 | msger.pause() | ||
| 578 | |||
| 579 | _pager_file(repo_eula_path) | ||
| 580 | |||
| 581 | # Asking for the "Accept/Decline" | ||
| 582 | if not msger.ask('Would you agree to the terms and conditions outlined in the above End User License Agreement?'): | ||
| 583 | msger.warning('Will not install pkgs from this repo.') | ||
| 584 | shutil.rmtree(repo_lic_dir) #cleanup | ||
| 585 | return False | ||
| 586 | |||
| 587 | # try to find support_info.html for extra infomation | ||
| 588 | repo_info_url = urlparse.urljoin(baseurl, "support_info.html") | ||
| 589 | repo_info_path = _check_and_download_url( | ||
| 590 | u2opener, | ||
| 591 | repo_info_url, | ||
| 592 | os.path.join(repo_lic_dir, repo.id + '_support_info.html')) | ||
| 593 | if repo_info_path: | ||
| 594 | msger.info('There is one more file in the repo for additional support information, please read it') | ||
| 595 | msger.pause() | ||
| 596 | _pager_file(repo_info_path) | ||
| 597 | |||
| 598 | #cleanup | ||
| 599 | shutil.rmtree(repo_lic_dir) | ||
| 600 | return True | ||
diff --git a/scripts/lib/mic/utils/runner.py b/scripts/lib/mic/utils/runner.py new file mode 100644 index 0000000000..fded3c93fa --- /dev/null +++ b/scripts/lib/mic/utils/runner.py | |||
| @@ -0,0 +1,109 @@ | |||
| 1 | #!/usr/bin/python -tt | ||
| 2 | # | ||
| 3 | # Copyright (c) 2011 Intel, Inc. | ||
| 4 | # | ||
| 5 | # This program is free software; you can redistribute it and/or modify it | ||
| 6 | # under the terms of the GNU General Public License as published by the Free | ||
| 7 | # Software Foundation; version 2 of the License | ||
| 8 | # | ||
| 9 | # This program is distributed in the hope that it will be useful, but | ||
| 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
| 11 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
| 12 | # for more details. | ||
| 13 | # | ||
| 14 | # You should have received a copy of the GNU General Public License along | ||
| 15 | # with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | # Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | |||
| 18 | import os | ||
| 19 | import subprocess | ||
| 20 | |||
| 21 | from mic import msger | ||
| 22 | |||
| 23 | def runtool(cmdln_or_args, catch=1): | ||
| 24 | """ wrapper for most of the subprocess calls | ||
| 25 | input: | ||
| 26 | cmdln_or_args: can be both args and cmdln str (shell=True) | ||
| 27 | catch: 0, quitely run | ||
| 28 | 1, only STDOUT | ||
| 29 | 2, only STDERR | ||
| 30 | 3, both STDOUT and STDERR | ||
| 31 | return: | ||
| 32 | (rc, output) | ||
| 33 | if catch==0: the output will always None | ||
| 34 | """ | ||
| 35 | |||
| 36 | if catch not in (0, 1, 2, 3): | ||
| 37 | # invalid catch selection, will cause exception, that's good | ||
| 38 | return None | ||
| 39 | |||
| 40 | if isinstance(cmdln_or_args, list): | ||
| 41 | cmd = cmdln_or_args[0] | ||
| 42 | shell = False | ||
| 43 | else: | ||
| 44 | import shlex | ||
| 45 | cmd = shlex.split(cmdln_or_args)[0] | ||
| 46 | shell = True | ||
| 47 | |||
| 48 | if catch != 3: | ||
| 49 | dev_null = os.open("/dev/null", os.O_WRONLY) | ||
| 50 | |||
| 51 | if catch == 0: | ||
| 52 | sout = dev_null | ||
| 53 | serr = dev_null | ||
| 54 | elif catch == 1: | ||
| 55 | sout = subprocess.PIPE | ||
| 56 | serr = dev_null | ||
| 57 | elif catch == 2: | ||
| 58 | sout = dev_null | ||
| 59 | serr = subprocess.PIPE | ||
| 60 | elif catch == 3: | ||
| 61 | sout = subprocess.PIPE | ||
| 62 | serr = subprocess.STDOUT | ||
| 63 | |||
| 64 | try: | ||
| 65 | p = subprocess.Popen(cmdln_or_args, stdout=sout, | ||
| 66 | stderr=serr, shell=shell) | ||
| 67 | (sout, serr) = p.communicate() | ||
| 68 | # combine stdout and stderr, filter None out | ||
| 69 | out = ''.join(filter(None, [sout, serr])) | ||
| 70 | except OSError, e: | ||
| 71 | if e.errno == 2: | ||
| 72 | # [Errno 2] No such file or directory | ||
| 73 | msger.error('Cannot run command: %s, lost dependency?' % cmd) | ||
| 74 | else: | ||
| 75 | raise # relay | ||
| 76 | finally: | ||
| 77 | if catch != 3: | ||
| 78 | os.close(dev_null) | ||
| 79 | |||
| 80 | return (p.returncode, out) | ||
| 81 | |||
| 82 | def show(cmdln_or_args): | ||
| 83 | # show all the message using msger.verbose | ||
| 84 | |||
| 85 | rc, out = runtool(cmdln_or_args, catch=3) | ||
| 86 | |||
| 87 | if isinstance(cmdln_or_args, list): | ||
| 88 | cmd = ' '.join(cmdln_or_args) | ||
| 89 | else: | ||
| 90 | cmd = cmdln_or_args | ||
| 91 | |||
| 92 | msg = 'running command: "%s"' % cmd | ||
| 93 | if out: out = out.strip() | ||
| 94 | if out: | ||
| 95 | msg += ', with output::' | ||
| 96 | msg += '\n +----------------' | ||
| 97 | for line in out.splitlines(): | ||
| 98 | msg += '\n | %s' % line | ||
| 99 | msg += '\n +----------------' | ||
| 100 | |||
| 101 | msger.verbose(msg) | ||
| 102 | return rc | ||
| 103 | |||
| 104 | def outs(cmdln_or_args, catch=1): | ||
| 105 | # get the outputs of tools | ||
| 106 | return runtool(cmdln_or_args, catch)[1].strip() | ||
| 107 | |||
| 108 | def quiet(cmdln_or_args): | ||
| 109 | return runtool(cmdln_or_args, catch=0)[0] | ||
