gzip.py 18.2 KB
Newer Older
1 2
"""Functions that read and write gzipped files.

3 4 5 6 7
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""

# based on Andrew Kuchling's minigzip.py distributed with the zlib module

8
import struct, sys, time, os
9
import zlib
10
import builtins
11
import io
12

13 14
__all__ = ["GzipFile","open"]

15 16 17 18
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16

READ, WRITE = 1, 2

19 20 21 22 23
def U32(i):
    """Return i as an unsigned integer, assuming it fits in 32 bits.
    If it's >= 2GB when viewed as a 32-bit unsigned int, return a long.
    """
    if i < 0:
24
        i += 1 << 32
25 26
    return i

27
def LOWU32(i):
Christian Heimes's avatar
Christian Heimes committed
28
    """Return the low-order 32 bits, as a non-negative int"""
29
    return i & 0xFFFFFFFF
30

31
def write32u(output, value):
32 33
    # The L format writes the bit pattern correctly whether signed
    # or unsigned.
34 35
    output.write(struct.pack("<L", value))

36
def read32(input):
Christian Heimes's avatar
Christian Heimes committed
37
    return struct.unpack("<I", input.read(4))[0]
38

39
def open(filename, mode="rb", compresslevel=9):
40 41 42 43 44 45
    """Shorthand for GzipFile(filename, mode, compresslevel).

    The filename argument is required; mode defaults to 'rb'
    and compresslevel defaults to 9.

    """
46 47
    return GzipFile(filename, mode, compresslevel)

48
class GzipFile(io.BufferedIOBase):
49
    """The GzipFile class simulates most of the methods of a file object with
50
    the exception of the readinto() and truncate() methods.
51 52

    """
53

54
    myfileobj = None
55
    max_read_chunk = 10 * 1024 * 1024   # 10Mb
56

Tim Peters's avatar
Tim Peters committed
57
    def __init__(self, filename=None, mode=None,
58
                 compresslevel=9, fileobj=None, mtime=None):
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, a StringIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        Be aware that only the 'rb', 'ab', and 'wb' values should be used
        for cross-platform portability.

        The compresslevel argument is an integer from 1 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression.  The default is 9.

85 86 87 88 89 90 91 92 93
        The mtime argument is an optional numeric timestamp to be written
        to the stream when compressing.  All gzip compressed streams
        are required to contain a timestamp.  If omitted or None, the
        current time is used.  This module ignores the timestamp when
        decompressing; however, some programs, such as gunzip, make use
        of it.  The format of the timestamp is the same as that of the
        return value of time.time() and of the st_mtime member of the
        object returned by os.stat().

94 95
        """

96 97 98 99
        # guarantee the file is opened in binary mode on platforms
        # that care about that sort of thing
        if mode and 'b' not in mode:
            mode += 'b'
100
        if fileobj is None:
101
            fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
102
        if filename is None:
103 104
            if hasattr(fileobj, 'name'): filename = fileobj.name
            else: filename = ''
105
        if mode is None:
106
            if hasattr(fileobj, 'mode'): mode = fileobj.mode
107
            else: mode = 'rb'
108 109 110

        if mode[0:1] == 'r':
            self.mode = READ
Tim Peters's avatar
Tim Peters committed
111
            # Set flag indicating start of a new member
112
            self._new_member = True
113 114 115
            # Buffer data read from gzip file. extrastart is offset in
            # stream where buffer starts. extrasize is number of
            # bytes remaining in buffer from current stream position.
116
            self.extrabuf = b""
117
            self.extrasize = 0
118
            self.extrastart = 0
119
            self.name = filename
120 121
            # Starts small, scales exponentially
            self.min_readsize = 100
122

123
        elif mode[0:1] == 'w' or mode[0:1] == 'a':
124 125 126
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel,
Tim Peters's avatar
Tim Peters committed
127
                                             zlib.DEFLATED,
128 129 130 131
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL,
                                             0)
        else:
132
            raise IOError("Mode " + mode + " not supported")
133 134

        self.fileobj = fileobj
135
        self.offset = 0
136
        self.mtime = mtime
137 138 139

        if self.mode == WRITE:
            self._write_gzip_header()
140

141 142 143
    @property
    def filename(self):
        import warnings
144
        warnings.warn("use the name attribute", DeprecationWarning, 2)
145 146 147 148
        if self.mode == WRITE and self.name[-3:] != ".gz":
            return self.name + ".gz"
        return self.name

149
    def __repr__(self):
150 151
        s = repr(self.fileobj)
        return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
152 153

    def _init_write(self, filename):
154
        self.name = filename
155
        self.crc = zlib.crc32(b"") & 0xffffffff
156 157 158
        self.size = 0
        self.writebuf = []
        self.bufsize = 0
159 160

    def _write_gzip_header(self):
161 162
        self.fileobj.write(b'\037\213')             # magic header
        self.fileobj.write(b'\010')                 # compression method
163
        try:
164 165
            # RFC 1952 requires the FNAME field to be Latin-1. Do not
            # include filenames that cannot be represented that way.
166 167
            fname = os.path.basename(self.name)
            fname = fname.encode('latin-1')
168 169
            if fname.endswith(b'.gz'):
                fname = fname[:-3]
170
        except UnicodeEncodeError:
171 172
            fname = b''
        flags = 0
173 174
        if fname:
            flags = FNAME
175
        self.fileobj.write(chr(flags).encode('latin-1'))
176 177 178 179
        mtime = self.mtime
        if mtime is None:
            mtime = time.time()
        write32u(self.fileobj, int(mtime))
180 181
        self.fileobj.write(b'\002')
        self.fileobj.write(b'\377')
182
        if fname:
183
            self.fileobj.write(fname + b'\000')
184 185

    def _init_read(self):
186
        self.crc = zlib.crc32(b"") & 0xffffffff
187
        self.size = 0
188 189

    def _read_gzip_header(self):
190
        magic = self.fileobj.read(2)
191
        if magic != b'\037\213':
192
            raise IOError('Not a gzipped file')
193 194
        method = ord( self.fileobj.read(1) )
        if method != 8:
195
            raise IOError('Unknown compression method')
196
        flag = ord( self.fileobj.read(1) )
197
        self.mtime = read32(self.fileobj)
198 199
        # extraflag = self.fileobj.read(1)
        # os = self.fileobj.read(1)
200
        self.fileobj.read(2)
201 202 203

        if flag & FEXTRA:
            # Read & discard the extra field, if present
204 205
            xlen = ord(self.fileobj.read(1))
            xlen = xlen + 256*ord(self.fileobj.read(1))
206 207 208
            self.fileobj.read(xlen)
        if flag & FNAME:
            # Read and discard a null-terminated string containing the filename
209
            while True:
210
                s = self.fileobj.read(1)
211
                if not s or s==b'\000':
212
                    break
213 214
        if flag & FCOMMENT:
            # Read and discard a null-terminated string containing a comment
215
            while True:
216
                s = self.fileobj.read(1)
217
                if not s or s==b'\000':
218
                    break
219 220
        if flag & FHCRC:
            self.fileobj.read(2)     # Read & discard the 16-bit header CRC
221 222

    def write(self,data):
223 224 225
        if self.mode != WRITE:
            import errno
            raise IOError(errno.EBADF, "write() on read-only GzipFile object")
Tim Peters's avatar
Tim Peters committed
226

227
        if self.fileobj is None:
228
            raise ValueError("write() on closed GzipFile object")
229 230 231 232 233

        # Convert data type if called by io.BufferedWriter.
        if isinstance(data, memoryview):
            data = data.tobytes()

234 235
        if len(data) > 0:
            self.size = self.size + len(data)
Christian Heimes's avatar
Christian Heimes committed
236
            self.crc = zlib.crc32(data, self.crc) & 0xffffffff
237
            self.fileobj.write( self.compress.compress(data) )
238
            self.offset += len(data)
239

240 241
        return len(data)

242
    def read(self, size=-1):
243 244
        if self.mode != READ:
            import errno
245
            raise IOError(errno.EBADF, "read() on write-only GzipFile object")
Tim Peters's avatar
Tim Peters committed
246

247
        if self.extrasize <= 0 and self.fileobj is None:
248
            return b''
249 250

        readsize = 1024
251
        if size < 0:        # get the whole thing
252
            try:
253
                while True:
254
                    self._read(readsize)
255
                    readsize = min(self.max_read_chunk, readsize * 2)
256 257 258 259 260 261
            except EOFError:
                size = self.extrasize
        else:               # just get some more of it
            try:
                while size > self.extrasize:
                    self._read(readsize)
262
                    readsize = min(self.max_read_chunk, readsize * 2)
263
            except EOFError:
264 265
                if size > self.extrasize:
                    size = self.extrasize
Tim Peters's avatar
Tim Peters committed
266

267 268
        offset = self.offset - self.extrastart
        chunk = self.extrabuf[offset: offset + size]
269 270
        self.extrasize = self.extrasize - size

271
        self.offset += size
272
        return chunk
273

274
    def _unread(self, buf):
275
        self.extrasize = len(buf) + self.extrasize
276
        self.offset -= len(buf)
277 278

    def _read(self, size=1024):
279
        if self.fileobj is None:
280
            raise EOFError("Reached EOF")
Tim Peters's avatar
Tim Peters committed
281

282
        if self._new_member:
283 284
            # If the _new_member flag is set, we have to
            # jump to the next member, if there is one.
Tim Peters's avatar
Tim Peters committed
285
            #
286 287 288 289 290
            # First, check if we're at the end of the file;
            # if so, it's time to stop; no more members to read.
            pos = self.fileobj.tell()   # Save current position
            self.fileobj.seek(0, 2)     # Seek to end of file
            if pos == self.fileobj.tell():
291
                raise EOFError("Reached EOF")
Tim Peters's avatar
Tim Peters committed
292
            else:
293
                self.fileobj.seek( pos ) # Return to original position
Tim Peters's avatar
Tim Peters committed
294 295

            self._init_read()
296 297
            self._read_gzip_header()
            self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
298
            self._new_member = False
Tim Peters's avatar
Tim Peters committed
299

300 301
        # Read a chunk of data from the file
        buf = self.fileobj.read(size)
Tim Peters's avatar
Tim Peters committed
302

303 304
        # If the EOF has been reached, flush the decompression object
        # and mark this object as finished.
Tim Peters's avatar
Tim Peters committed
305

306
        if buf == b"":
307
            uncompress = self.decompress.flush()
308 309
            self._read_eof()
            self._add_read_data( uncompress )
310
            raise EOFError('Reached EOF')
Tim Peters's avatar
Tim Peters committed
311

312 313 314
        uncompress = self.decompress.decompress(buf)
        self._add_read_data( uncompress )

315
        if self.decompress.unused_data != b"":
316 317 318 319 320 321 322 323
            # Ending case: we've come to the end of a member in the file,
            # so seek back to the start of the unused data, finish up
            # this member, and read a new gzip header.
            # (The number of bytes to seek back is the length of the unused
            # data, minus 8 because _read_eof() will rewind a further 8 bytes)
            self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)

            # Check the CRC and file size, and set the flag so we read
Tim Peters's avatar
Tim Peters committed
324
            # a new member on the next call
325
            self._read_eof()
326
            self._new_member = True
Tim Peters's avatar
Tim Peters committed
327 328

    def _add_read_data(self, data):
Christian Heimes's avatar
Christian Heimes committed
329
        self.crc = zlib.crc32(data, self.crc) & 0xffffffff
330 331
        offset = self.offset - self.extrastart
        self.extrabuf = self.extrabuf[offset:] + data
332
        self.extrasize = self.extrasize + len(data)
333
        self.extrastart = self.offset
334
        self.size = self.size + len(data)
335 336

    def _read_eof(self):
337
        # We've read to the end of the file, so we have to rewind in order
Tim Peters's avatar
Tim Peters committed
338
        # to reread the 8 bytes containing the CRC and the file size.
339
        # We check the that the computed CRC and size of the
340 341
        # uncompressed data matches the stored values.  Note that the size
        # stored is the true file size mod 2**32.
342
        self.fileobj.seek(-8, 1)
343
        crc32 = read32(self.fileobj)
Christian Heimes's avatar
Christian Heimes committed
344 345 346 347
        isize = read32(self.fileobj)  # may exceed 2GB
        if crc32 != self.crc:
            raise IOError("CRC check failed %s != %s" % (hex(crc32),
                                                         hex(self.crc)))
348
        elif isize != (self.size & 0xffffffff):
349
            raise IOError("Incorrect length of data produced")
Tim Peters's avatar
Tim Peters committed
350

351 352 353 354 355 356 357 358 359
        # Gzip files can be padded with zeroes and still have archives.
        # Consume all zero bytes and set the file position to the first
        # non-zero byte. See http://www.gzip.org/#faq8
        c = b"\x00"
        while c == b"\x00":
            c = self.fileobj.read(1)
        if c:
            self.fileobj.seek(-1, 1)

360 361 362 363
    @property
    def closed(self):
        return self.fileobj is None

364
    def close(self):
Georg Brandl's avatar
Georg Brandl committed
365 366
        if self.fileobj is None:
            return
367 368
        if self.mode == WRITE:
            self.fileobj.write(self.compress.flush())
Christian Heimes's avatar
Christian Heimes committed
369
            write32u(self.fileobj, self.crc)
370
            # self.size may exceed 2GB, or even 4GB
371
            write32u(self.fileobj, self.size & 0xffffffff)
372 373 374 375 376 377
            self.fileobj = None
        elif self.mode == READ:
            self.fileobj = None
        if self.myfileobj:
            self.myfileobj.close()
            self.myfileobj = None
378

379 380
    def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
        if self.mode == WRITE:
Tim Peters's avatar
Tim Peters committed
381 382
            # Ensure the compressor's buffer is flushed
            self.fileobj.write(self.compress.flush(zlib_mode))
383
            self.fileobj.flush()
384

385 386 387 388 389 390 391 392
    def fileno(self):
        """Invoke the underlying file object's fileno() method.

        This will raise AttributeError if the underlying file object
        doesn't support fileno().
        """
        return self.fileobj.fileno()

393 394
    def rewind(self):
        '''Return the uncompressed stream file position indicator to the
Tim Peters's avatar
Tim Peters committed
395
        beginning of the file'''
396 397 398
        if self.mode != READ:
            raise IOError("Can't rewind in write mode")
        self.fileobj.seek(0)
399
        self._new_member = True
400
        self.extrabuf = b""
401
        self.extrasize = 0
402
        self.extrastart = 0
403 404
        self.offset = 0

405 406 407 408 409 410 411 412 413
    def readable(self):
        return self.mode == READ

    def writable(self):
        return self.mode == WRITE

    def seekable(self):
        return True

414 415 416 417 418 419
    def seek(self, offset, whence=0):
        if whence:
            if whence == 1:
                offset = self.offset + offset
            else:
                raise ValueError('Seek from end not supported')
420 421 422 423
        if self.mode == WRITE:
            if offset < self.offset:
                raise IOError('Negative seek in write mode')
            count = offset - self.offset
424
            chunk = bytes(1024)
425
            for i in range(count // 1024):
426 427
                self.write(chunk)
            self.write(bytes(count % 1024))
428 429 430 431 432
        elif self.mode == READ:
            if offset < self.offset:
                # for negative seek, rewind and do positive seek
                self.rewind()
            count = offset - self.offset
433 434
            for i in range(count // 1024):
                self.read(1024)
435 436
            self.read(count % 1024)

437 438
        return self.offset

439
    def readline(self, size=-1):
440
        if size < 0:
441 442 443 444 445 446 447 448
            # Shortcut common case - newline found in buffer.
            offset = self.offset - self.extrastart
            i = self.extrabuf.find(b'\n', offset) + 1
            if i > 0:
                self.extrasize -= i - offset
                self.offset += i - offset
                return self.extrabuf[offset: i]

449
            size = sys.maxsize
450 451 452
            readsize = self.min_readsize
        else:
            readsize = size
453
        bufs = []
454
        while size != 0:
455
            c = self.read(readsize)
456
            i = c.find(b'\n')
457 458 459 460 461 462 463

            # We set i=size to break out of the loop under two
            # conditions: 1) there's no newline, and the chunk is
            # larger than size, or 2) there is a newline, but the
            # resulting line would be longer than 'size'.
            if (size <= i) or (i == -1 and len(c) > size):
                i = size - 1
464

465
            if i >= 0 or c == b'':
466 467 468
                bufs.append(c[:i + 1])    # Add portion of last chunk
                self._unread(c[i + 1:])   # Push back rest of chunk
                break
469 470

            # Append chunk to list, decrease 'size',
471
            bufs.append(c)
472 473
            size = size - len(c)
            readsize = min(size, readsize * 2)
474 475
        if readsize > self.min_readsize:
            self.min_readsize = min(readsize, self.min_readsize * 2, 512)
476
        return b''.join(bufs) # Return resulting line
Tim Peters's avatar
Tim Peters committed
477

478 479 480 481 482 483 484 485

def _test():
    # Act like gzip; with -d, act like gunzip.
    # The input file is not deleted, however, nor are any other gzip
    # options or features supported.
    args = sys.argv[1:]
    decompress = args and args[0] == "-d"
    if decompress:
486
        args = args[1:]
487
    if not args:
488
        args = ["-"]
489
    for arg in args:
490 491
        if decompress:
            if arg == "-":
492 493
                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin.buffer)
                g = sys.stdout.buffer
494 495
            else:
                if arg[-3:] != ".gz":
496
                    print("filename doesn't end in .gz:", repr(arg))
497 498
                    continue
                f = open(arg, "rb")
499
                g = builtins.open(arg[:-3], "wb")
500 501
        else:
            if arg == "-":
502 503
                f = sys.stdin.buffer
                g = GzipFile(filename="", mode="wb", fileobj=sys.stdout.buffer)
504
            else:
505
                f = builtins.open(arg, "rb")
506
                g = open(arg + ".gz", "wb")
507
        while True:
508 509 510 511 512 513 514 515
            chunk = f.read(1024)
            if not chunk:
                break
            g.write(chunk)
        if g is not sys.stdout:
            g.close()
        if f is not sys.stdin:
            f.close()
516 517 518

if __name__ == '__main__':
    _test()