|
| 1 | +"""Performance data streaming performance""" |
| 2 | + |
| 3 | +from test.testlib import * |
| 4 | +from git.odb.db import * |
| 5 | + |
| 6 | +from array import array |
| 7 | +from cStringIO import StringIO |
| 8 | +from time import time |
| 9 | +import os |
| 10 | +import sys |
| 11 | +import stat |
| 12 | +import random |
| 13 | + |
| 14 | + |
| 15 | +from lib import ( |
| 16 | + TestBigRepoReadOnly |
| 17 | + ) |
| 18 | + |
| 19 | + |
| 20 | + |
| 21 | +def make_memory_file(size_in_bytes, randomize=False): |
| 22 | + """:return: tuple(size_of_stream, stream) |
| 23 | + :param randomize: try to produce a very random stream""" |
| 24 | + actual_size = size_in_bytes / 4 |
| 25 | + producer = xrange(actual_size) |
| 26 | + if randomize: |
| 27 | + producer = list(producer) |
| 28 | + random.shuffle(producer) |
| 29 | + # END randomize |
| 30 | + a = array('i', producer) |
| 31 | + return actual_size*4, StringIO(a.tostring()) |
| 32 | + |
| 33 | + |
| 34 | +class TestObjDBPerformance(TestBigRepoReadOnly): |
| 35 | + |
| 36 | + large_data_size_bytes = 1000*1000*10 # some MiB should do it |
| 37 | + moderate_data_size_bytes = 1000*1000*1 # just 1 MiB |
| 38 | + |
| 39 | + @with_bare_rw_repo |
| 40 | + def test_large_data_streaming(self, rwrepo): |
| 41 | + ldb = LooseObjectDB(os.path.join(rwrepo.git_dir, 'objects')) |
| 42 | + |
| 43 | + for randomize in range(2): |
| 44 | + desc = (randomize and 'random ') or '' |
| 45 | + print >> sys.stderr, "Creating %s data ..." % desc |
| 46 | + st = time() |
| 47 | + size, stream = make_memory_file(self.large_data_size_bytes, randomize) |
| 48 | + elapsed = time() - st |
| 49 | + print >> sys.stderr, "Done (in %f s)" % elapsed |
| 50 | + |
| 51 | + # writing - due to the compression it will seem faster than it is |
| 52 | + st = time() |
| 53 | + sha = ldb.to_object('blob', size, stream) |
| 54 | + elapsed = time() - st |
| 55 | + assert ldb.has_object(sha) |
| 56 | + fsize_kib = os.path.getsize(ldb.readable_db_object_path(sha)) / 1000 |
| 57 | + |
| 58 | + |
| 59 | + size_kib = size / 1000 |
| 60 | + print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed, size_kib / elapsed) |
| 61 | + |
| 62 | + # reading all at once |
| 63 | + st = time() |
| 64 | + type, size, shastream = ldb.object(sha) |
| 65 | + shadata = shastream.read() |
| 66 | + elapsed = time() - st |
| 67 | + |
| 68 | + stream.seek(0) |
| 69 | + assert shadata == stream.getvalue() |
| 70 | + print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed, size_kib / elapsed) |
| 71 | + |
| 72 | + |
| 73 | + # reading in chunks of 1 MiB |
| 74 | + cs = 512*1000 |
| 75 | + chunks = list() |
| 76 | + st = time() |
| 77 | + type, size, shastream = ldb.object(sha) |
| 78 | + while True: |
| 79 | + data = shastream.read(cs) |
| 80 | + chunks.append(data) |
| 81 | + if len(data) < cs: |
| 82 | + break |
| 83 | + # END read in chunks |
| 84 | + elapsed = time() - st |
| 85 | + |
| 86 | + stream.seek(0) |
| 87 | + assert ''.join(chunks) == stream.getvalue() |
| 88 | + |
| 89 | + cs_kib = cs / 1000 |
| 90 | + print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed, size_kib / elapsed) |
| 91 | + # END for each randomization factor |
0 commit comments