예제 #1
0
def perform_update(cx, suite, prefix, fetch):
    # Fetch the actual data.
    metadata = load_metadata(prefix)
    earliest_run_id = metadata['earliest_run_id']

    sys.stdout.write('Querying ' + prefix + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(earliest_run_id)
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' new rows in ' + diff)

    # Break everything into months.
    year = 0
    month = 0
    current = []
    months = []
    for row in rows:
        stamp = int(row[1]) - timezone_offset()
        t = time.gmtime(stamp)
        if t.tm_year != year or t.tm_mon != month:
            if year and len(current):
                months.append(((year, month), current))
            current = []
            year = t.tm_year
            month = t.tm_mon

        current.append(row)

    if len(current):
        months.append(((year, month), current))

    for when, data in months:
        name = prefix + '-' + str(when[0]) + '-' + str(when[1])

        sys.stdout.write('Updating cache for ' + name + '...')
        sys.stdout.flush()
        with Profiler() as p:
            update_cache(cx, suite, name, when, data)
            diff = p.time()
        print('took ' + diff)

    if len(rows):
        metadata['earliest_run_id'] = rows[-1][0]
        save_metadata(prefix, metadata)

    return new_rows
예제 #2
0
def run_train_test(ds_name, metric, params, obj):
    path = _DATA_PATH + ds_name
    with Profiler('initial feature selection'):
        x_initial_raw, y_initial, _ = load_data(f'{path}/train.csv',
                                                mode='train',
                                                sample=_SAMPLE)
        x_initial, ini_params = initial_processing(x_initial_raw, mode='train')

        tf = CatTransformer(ini_params['cat_cols'])
        # tf.fit(x_initial)
        x_initial_tf = tf.fit_transform(x_initial)
        selected_features, feat_list = ols_selection(x_initial_tf, y_initial,
                                                     obj)
        hp_params = hyperopt_lgb(x_initial_tf[feat_list], y_initial, params,
                                 obj)

    print('selected features=', len(selected_features))

    x_train_raw, y_train, _ = load_data(f'{path}/train.csv',
                                        mode='train',
                                        sample=_SAMPLE,
                                        used_cols=selected_features)

    x_test_raw, _, _ = load_data(f'{path}/test.csv', mode='test')
    y_test = load_test_label(f'{path}/test-target.csv')

    x_train, train_params = initial_processing(x_train_raw, mode='train')
    x_test, test_params = initial_processing(x_test_raw, mode='test')

    with Profiler('fit transform cat columns'):
        x_test_rein = x_test.reindex(columns=train_params['used_cols'])
        tf = CatTransformer(train_params['cat_cols'])
        tf.fit(x_train)
        x_train_tf = tf.transform(x_train)
        x_test_tf = tf.transform(x_test_rein)

    with Profiler('run train'):
        model = lgb.train(hp_params, lgb.Dataset(x_train_tf, label=y_train),
                          600)

    with Profiler('predict'):
        y_train_out = model.predict(x_train_tf)
        y_test_out = model.predict(x_test_tf)

    train_err = metric(y_train, y_train_out)
    test_err = metric(y_test, y_test_out)

    return train_err, test_err
예제 #3
0
def test_class_profiler_should_calculate_total_acceleration_correctly_balanced_forces(
):
    # Testing to see acceleration = 0 when F_gravity == F_buoyancy

    parameters = {
        'body_d': 0.50465,
        'piston_d': 0.0,
        'piston_l': 0.0,
        'density': 1025,
        'depth': 0.0,
        'velocity': 0.0,
        'mass': 50.0
    }

    # f_b  - f_g = 0
    # f_g = m * g
    # f_b = V * r * g

    # f_b = m * acc_b
    # m * acc_b - m * g = 0
    # acc_b - g = 0
    f_g = parameters['mass'] * g

    volume = parameters['mass'] / parameters['density']
    area = ((parameters['body_d'] / 2)**2) * np.pi
    length = volume / area

    parameters['body_l'] = length

    p = Profiler(**parameters)

    assert 0 == p.acceleration
예제 #4
0
def train():

    # Initialize torch.distributed
    init_distributed()

    print_rank_0('AutoMP: training GPT2...')
    # Use fake train data
    batch_size = args.batch_size
    sequence_length = args.sequence_length
    hidden_size = args.hidden_size
    vocab_size = args.vocab_size
    dropout_prob = args.hidden_dropout

    input_indices = torch.randint(low=0,
                                  high=vocab_size,
                                  size=(batch_size, sequence_length))
    input_indices = input_indices.to(torch.cuda.current_device())
    position_indices = torch.tile(torch.arange(start=0, end=sequence_length),
                                  (batch_size, 1))
    position_indices = position_indices.to(torch.cuda.current_device())
    print_rank_0(f'AutoMP: input_indices shape = {input_indices.size()}')
    print_rank_0(f'AutoMP: position_indices shape = {position_indices.size()}')

    def init_method_normal(tensor):
        return torch.nn.init.normal_(tensor, mean=0.0, std=1.0)

    embedding = Embedding(hidden_size=hidden_size,
                          vocab_size=vocab_size,
                          max_sequence_length=sequence_length,
                          embedding_dropout_prob=dropout_prob,
                          init_method=init_method_normal)

    optimizer = torch.optim.SGD(embedding.parameters(), lr=0.01)

    profiler = Profiler(os.path.join('benchmark', args.exp_name))

    num_epochs = 5
    tot_time = 0
    nproc = torch.distributed.get_world_size()

    for epoch in range(num_epochs):
        overall_name = f'emb_np-{nproc}_vs-{vocab_size}'
        profiler.start(overall_name)

        # Forward pass
        profiler.start(f'emb_forward_np-{nproc}_vs-{vocab_size}')
        embedding_output = embedding.forward(input_indices, position_indices)
        train_loss = torch.mean(embedding_output)
        torch.cuda.synchronize()
        profiler.stop(f'emb_forward_np-{nproc}_vs-{vocab_size}')

        # Backward pass
        profiler.start(f'emb_backward_np-{nproc}_vs-{vocab_size}')
        optimizer.zero_grad()
        train_loss.backward()
        optimizer.step()
        torch.cuda.synchronize()
        profiler.stop(f'emb_backward_np-{nproc}_vs-{vocab_size}')

        profiler.stop(overall_name)
예제 #5
0
def test_class_profiler_should_initialize_with_given_parameters():
    parameters = {
        'body_d': 0.4,
        'body_l': 1.0,
        'piston_d': 0.1,
        'piston_l': 0.3,
        'density': 1023.2,
        'depth': 0.0,
        'velocity': 0.0,
        'mass': 12.2
    }

    p = Profiler(**parameters)

    expected_body_volume = 0.12566
    expected_piston_volume = 0.002356

    assert np.isclose(expected_body_volume,
                      p.body.volume,
                      rtol=1e-3,
                      atol=1e-4)
    assert np.isclose(expected_piston_volume,
                      p.piston.volume,
                      rtol=1e-3,
                      atol=1e-4)
    assert parameters['density'] == p.water.density
    assert parameters['depth'] == p.water.depth
    assert parameters['velocity'] == p.velocity
    assert parameters['mass'] == p._mass
예제 #6
0
    def __init__(self, players):
        pf = Profiler()
        pf.printer.indent()
        pf.printer.silent = not PERFORMANCE_LOGGING

        self.players = players

        # Set up the deck
        deck = INITIAL_DECK[:]
        pf.measure("Set up deck")
        shuffle(deck)

        pf.measure("Shuffle deck")

        self.deck = deck
        """ Current draw deck. Excludes Snitches. """

        self.discard_deck = []
        """ Deck discarded (played) cards. Excludes Snitches. """

        pf.measure("Other set up")
        # Give each player the initial number of Snitches and deal the rest.
        for player in self.players:
            number_of_character_cards_to_deal = INITIAL_CARDS_PER_PLAYER - INITIAL_SNITCHES_PER_PLAYER
            character_hand = self.deck[:number_of_character_cards_to_deal]
            del self.deck[:number_of_character_cards_to_deal]
            player.set_up(
                hand=INITIAL_SNITCH_HAND + character_hand,
                coins=INITIAL_COINS_PER_PLAYER)
            player.prepare()
        
        pf.measure("Give players cards")
예제 #7
0
def renew_cache(cx, machine, suite, prefix, when, fetch):
    delete_cache(prefix + '-' + str(when[0]) + '-' + str(when[1]));

    # Delete corresponding condensed graph
    before, after = prefix.split("raw", 1)
    delete_cache(before + "condensed" + after + '-' + str(when[0]) + '-' + str(when[1]));

    dt = datetime.datetime(year=when[0], month=when[1], day=1)
    start_stamp = int(time.mktime(dt.timetuple()))

    next_month = when[1] + 1
    next_year = when[0]
    if next_month == 13:
        next_month = 1
        next_year += 1
    dt = datetime.datetime(year=next_year, month=next_month, day=1)
    stop_stamp = int(time.mktime(dt.timetuple())) - 1

    name = prefix + '-' + str(when[0]) + '-' + str(when[1])

    # Querying all information from this month.
    sys.stdout.write('Fetching monthly info ' + name + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(machine, approx_stamp=(start_stamp,stop_stamp))
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' rows in ' + diff)

    update_cache(cx, suite, name, when, rows) 
예제 #8
0
def main(rank, world_size):
    global tb_logger, profiler, dataset, log_dir, trainloader, testloader

    if not args.no_tensorboard:
        log_dir = os.path.join('log', 
            args.expid,
            datetime.now().isoformat())

        tb_logger = SummaryWriter(log_dir=log_dir)

    logger = Logger()

    if not args.no_profiler:
        profiler = Profiler(logger, tb_logger, freq=args.profile_freq)
        profiler.log(log_network=args.profile_networkio)

    tb_logger.add_text('params/batch_size', str(args.batch_size/world_size), 1)

    sync_batch = args.batch_size / world_size

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    trainset.train_data = np.split(trainset.train_data, args.split_by)[args.split_to_use]
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=sync_batch, shuffle=False, num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

    for epoch in range(args.epochs):
        train(epoch)
        test(epoch)
예제 #9
0
파일: update.py 프로젝트: isabella232/awfy
def renew_cache(cx, machine, suite, prefix, when, last_stamp, fetch):
    delete_cache(prefix + '-' + str(when[0]) + '-' + str(when[1]))

    # Delete corresponding condensed graph
    if prefix[0:3] == "raw":
        delete_cache("condensed" + prefix[3:] + '-' + str(when[0]) + '-' +
                     str(when[1]))
    else:
        delete_cache("bk-condensed" + prefix[6:] + '-' + str(when[0]) + '-' +
                     str(when[1]))

    dt = datetime.datetime(year=when[0], month=when[1], day=1)
    start_stamp = int(time.mktime(dt.timetuple()))

    next_month = when[1] + 1
    next_year = when[0]
    if next_month == 13:
        next_month = 1
        next_year += 1
    dt = datetime.datetime(year=next_year, month=next_month, day=1)
    stop_stamp = int(time.mktime(dt.timetuple())) - 1
    if last_stamp < stop_stamp:
        stop_stamp = last_stamp

    # Querying all information from this month.
    sys.stdout.write('Querying ' + prefix + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(machine, test_stamp=(start_stamp, stop_stamp))
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' rows in ' + diff)

    name = prefix + '-' + str(when[0]) + '-' + str(when[1])
    update_cache(cx, suite, name, when, rows)
예제 #10
0
파일: tests.py 프로젝트: jack2150/rivers0.2
    def test_create_opinion_button(self):
        """
        Test create opinion button
        """
        print 'run create_opinion_button...'
        opinion_button = self.profiler.create_opinion_button()

        print 'opinion_button:'
        print opinion_button

        self.assertEqual(type(opinion_button), dict)
        self.assertTrue(opinion_button['saved'])
        self.assertEqual(opinion_button['object'].id, self.position_opinion.id)

        print '\n' + '.' * 60 + '\n'
        print 'test false...'
        print '\n' + '.' * 60 + '\n'

        print 'run create_opinion_button...'
        self.profiler = Profiler(self.position_set, date='2015-04-09')
        opinion_button = self.profiler.create_opinion_button()

        self.assertEqual(type(opinion_button), dict)
        self.assertFalse(opinion_button['saved'])

        print 'opinion_button:'
        print opinion_button
예제 #11
0
파일: tests.py 프로젝트: jack2150/rivers0.2
    def setUp(self):
        TestSetUpDB.setUp(self)

        # set date
        self.date = '2015-04-28'

        # create underlying
        self.underlying = Underlying(symbol='AAPL', company='APPLE INC')

        # create position set
        self.position_set = PositionSet()
        self.position_set.underlying = self.underlying
        self.position_set.name = 'EQUITY'
        self.position_set.spread = 'LONG_STOCK'
        self.position_set.start_date = datetime.strptime(
            '2015-04-13', '%Y-%m-%d').date()
        self.position_set.stop_date = datetime.strptime(
            '2015-04-28', '%Y-%m-%d').date()
        self.position_set.save()

        # create position opinion
        self.position_opinion = self.create_opinion(
            date=Profiler.move_bday(self.date, 1),
            position_set=self.position_set,
            direction='BULL')

        # create profiler now
        self.profiler = Profiler(position_set=self.position_set,
                                 date=self.date)
예제 #12
0
def test_class_profiler_should_calculate_total_acceleration_with_drag_included(
):
    parameters = {
        'body_d': 0.50465,
        'body_l': 1.0,
        'piston_d': 0.0,
        'piston_l': 0.0,
        'density': 1025,
        'depth': 0.0,
        'velocity': 1.0,
        'mass': 50.0
    }

    expected_f_drag = 84.05

    f_buoy = 2010.55
    f_grav = 490.33

    f_total = f_buoy - (expected_f_drag + f_grav)

    p = Profiler(**parameters)
    print(p.acceleration)
    assert np.isclose(expected_f_drag,
                      p.drag.drag,
                      rtol=1e-4,
                      atol=1e-6,
                      equal_nan=False)
예제 #13
0
 def __init__(self):
     self.conf_space = ConfSpace()
     self.profiler = Profiler()
     self.predictor = PerfPredict()
     self.exploit_times = 0
     self.exploit_max = 5
     # how many configurations to predict with performance predictor before really runs the benchmark
     self.predict_max = 1000
예제 #14
0
파일: tests.py 프로젝트: jack2150/rivers0.2
    def setUp(self):
        TestUnitSetUp.setUp(self)

        self.position_set = PositionSet.objects.get(id=65)
        self.date = self.position_set.filledorder_set.order_by('trade_summary__date') \
            .last().trade_summary.date.strftime('%Y-%m-%d')

        self.profiler = Profiler(position_set=self.position_set,
                                 date=self.date)
예제 #15
0
    def __init__(self, socket):
        log.error(DEBUG, msg='worker __init__')

        self._socket = socket
        self._epoll = select.epoll()
        self._profiler = Profiler()
        self._activity_iterators = {}
        self._child_pids = []
        self._client_conns = []
        self._socket.setblocking(False)
예제 #16
0
def main(argv):
    sys.stdout.write('Computing master properties... ')
    sys.stdout.flush()
    with Profiler() as p:
        cx = data.Context()
        diff = p.time()
    print('took ' + diff)

    update_all(cx)
    condenser.condense_all(cx)
    export_master(cx)
예제 #17
0
def condense(cx, suite, prefix, name):
    with Profiler() as p:
        sys.stdout.write('Importing all datapoints for ' + name + '... ')
        sys.stdout.flush()

        files = find_all_months(cx, prefix, name)
        diff = p.time()

    print('took ' + diff)

    if not len(files):
        return False

    change = False

    for when, raw_file in files:
        condensed_name = prefix + 'condensed-' + name + '-' + str(
            when[0]) + '-' + str(when[1])
        condensed_file = condensed_name + '.json'

        # Only update the graph when condensed file is older.
        if os.path.exists(os.path.join(
                awfy.path, condensed_file)) and file_is_newer(
                    os.path.join(awfy.path, condensed_file),
                    os.path.join(awfy.path, raw_file)):
            continue

        # There was a datapoint added to one of the condensed files.
        change = True

        with Profiler() as p:
            sys.stdout.write('Condensing ' + condensed_name + '... ')
            sys.stdout.flush()

            graph = retrieve_graph(cx, raw_file)

            condense_month(cx, suite, graph, prefix, condensed_name)
            diff = p.time()
        print(' took ' + diff)

    return change
예제 #18
0
def main():
    model = Network()
    checkpoint = torch.load('/mnt/nvme/adas/code/custom_layer/checkpoint_8.pt')
    model.load_state_dict(checkpoint['model_state_dict'])

    P = Profiler(model, (4, 3, 512, 256))
    total_ops, total_params, total_mult, total_add = P.profile()

    print("=====================================================")
    print("Mult Ops: %f GOps" % (total_mult / 1e9))
    print("Add Ops: %f GOps" % (total_add / 1e9))
    print("Total Ops: %f GOps" % (total_ops / 1e9))
    print("Total Parameters: %f * 10^6 " % (total_params / 1e6))
예제 #19
0
    def profiler(self, job_name):
        db_meta = self._load_db_metadata()
        if isinstance(job_name, basestring):
            job_id = None
            for job in db_meta.jobs:
                if job.name == job_name:
                    job_id = job.id
                    break
            if job_id is None:
                raise ScannerException('Job name {} does not exist'.format(job_name))
        else:
            job_id = job_name

        return Profiler(self, job_id)
def fetch_test_scores(machine_id,
                      suite_id,
                      name,
                      finish_stamp=(0, "UNIX_TIMESTAMP()"),
                      approx_stamp=(0, "UNIX_TIMESTAMP()")):
    c = awfy.db.cursor()
    query = "SELECT id FROM awfy_suite_test \
             WHERE name = %s"

    c.execute(query, [name])
    suite_ids = ['0']
    for row in c.fetchall():
        suite_ids.append(str(row[0]))

    with Profiler() as p:
        query = "SELECT id                                    \
                 FROM awfy_run                                \
                 WHERE status > 0                             \
                 AND machine = %s                             \
                 AND approx_stamp >= " + str(approx_stamp[0]) + " \
                 AND approx_stamp <= " + str(approx_stamp[1]) + " \
                 AND finish_stamp >= " + str(finish_stamp[0]) + " \
                 AND finish_stamp <= " + str(finish_stamp[1]) + " \
                 "

        c.execute(query, [machine_id])
        run_ids = ['0']
        for row in c.fetchall():
            run_ids.append(str(row[0]))
        diff = p.time()
    print('found ' + str(len(run_ids)) + ' rows in ' + diff)

    query = "SELECT r.id, r.approx_stamp, bu.cset, s.score, bu.mode_id, v.id, s.id \
             FROM awfy_suite_version v                                             \
             JOIN awfy_suite_test t ON v.id = t.suite_version_id                   \
             JOIN awfy_breakdown s ON s.suite_test_id = t.id                       \
             JOIN awfy_score s1 ON s.score_id = s1.id                              \
             JOIN awfy_build bu ON s1.build_id = bu.id                             \
             JOIN awfy_run r ON r.id = bu.run_id                                   \
             WHERE v.suite_id = %s                                                 \
             AND t.id in (" + (
        ",".join(suite_ids)) + ")                               \
             AND r.id in (" + (
            ",".join(run_ids)) + ")                                 \
             ORDER BY r.sort_order ASC                                             \
             "

    c.execute(query, [suite_id])
    return c.fetchall()
예제 #21
0
    def train(self):
        rnn_type = LSTM

        rnn_dropout = 0.0
        rnn_units = 128
        rnn_timesteps = 128
        rnn_features = 32

        output_size = 1

        batch_size = 512
        epochs = 10000000

        input_shape = (rnn_timesteps, rnn_features)

        if os.path.isfile(self.model_path):
            profiler = Profiler()
            model = load_model(self.model_path,
                               custom_objects={
                                   'root_mean_squared_error':
                                   root_mean_squared_error
                               })
            profiler.stop(f'Loaded model from "{self.model_path}".')
        else:
            model = Sequential()
            model.add(
                rnn_type(rnn_units,
                         dropout=rnn_dropout,
                         return_sequences=False,
                         input_shape=input_shape))
            model.add(Dense(output_size))
            model.add(Activation('tanh'))
            optimizer = Adam(lr=0.01)
            model.compile(optimizer=optimizer, loss=root_mean_squared_error)

        training_generator = SlidingWindowGenerator(self.x_training_wav,
                                                    self.y_training_wav,
                                                    input_shape, output_size,
                                                    batch_size)
        validation_generator = SlidingWindowGenerator(self.x_validation_wav,
                                                      self.y_validation_wav,
                                                      input_shape, output_size,
                                                      batch_size)
        save_callback = SaveCallback(self.model_path)
        history = model.fit_generator(generator=training_generator,
                                      epochs=epochs,
                                      verbose=1,
                                      validation_data=validation_generator,
                                      callbacks=[save_callback])
예제 #22
0
def main(size=50000, sort=quicksort):

    lyst = []
    p = Profiler()
    for count in range(size):
        lyst.append(random.randint(1, size + 1))
    print(lyst)
    p.test(sort,
           lyst,
           size,
           unique=True,
           comp=True,
           exch=True,
           trace=False,
           mod=False)
예제 #23
0
def condense(cx, suite, prefix, name):
    sys.stdout.write('Importing all datapoints for ' + name + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        graphs = find_all_months(cx, prefix, name)
        diff = p.time()
    print('took ' + diff)

    if not len(graphs):
        return

    for when, graph in graphs:
        new_name = prefix + 'condensed-' + name + '-' + str(
            when[0]) + '-' + str(when[1])

        # Don't condense if it already exists...
        if not should_export(new_name + '.json', when):
            continue

        sys.stdout.write('Condensing ' + new_name + '... ')
        sys.stdout.flush()
        with Profiler() as p:
            condense_month(cx, suite, graph, prefix, new_name)
            diff = p.time()
        print(' took ' + diff)

    # Combine everything.
    sys.stdout.write('Aggregating ' + name + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        combined = combine([graph for when, graph in graphs])
        summary = aggregate(combined)
        diff = p.time()
    print('took ' + diff)

    return summary
예제 #24
0
    def __init__(self, init_state: tuple):
        self.init_state = init_state

        # saves states tuples, uses a double ended queue;
        # can behave both as a FIFO queue and as a LIFO stack
        self.fringe = deque()

        # saves states tuples; uses a set for speed when checking if a state
        # has been explored
        self.explored = set()

        # saves nodes details for every node state:
        # nodeState -> (nodeParent, action, cost, depth)
        self.node_db = dict()

        self.profiler = Profiler()
        self.set_goal()
예제 #25
0
 def __init__(self):
     self.conf_space = ConfSpace()
     self.semantics = self.conf_space.hadoop_semantics
     self.profiler = Profiler()
     # self.predictor = PerfPredict()
     self.profile_num = 0  # initial profiling index
     # self.performance_track = []
     # self.K_iters_for_convergence_test = 5   # check the last 5 iterations
     # self.perf_improvement_threshold = 0.02   # 2% of the improvement on the last cycle
     self.best_conf = None
     self.global_improvement = 0.5
     self.max_iter = 6000
     self.initial_perf = sys.maxsize
     self.type_checker = HadoopConfChecker()
     self.type_checker.set_all_param_value(self.conf_space.param_values)
     self.invalid_confs = []
     self.curr_genconf_folder = cfg.gen_confs + os.sep + 'conf'
예제 #26
0
def main():
    old_stdout = sys.stdout
    with open('data_dictionary/profiles/backup.nquads', "w+") as backup:
        # backup database
        sys.stdout = backup
        Backup()
    with open('data_dictionary/jupiter_ontology.md', "w+") as ontology:
        # serialize a new jupiter ontology file
        sys.stdout = ontology
        owlDocument().generate()
    sys.stdout = old_stdout
    for ptype in [
            "collection", "community", "generic", "thesis", "oai_pmh",
            "oai_etdms"
    ]:
        # serialize a profile for each object type
        Profiler(ptype)
    excelGen()
예제 #27
0
def findBestCase():
    '''
    Function to compile a table of the time used for sorting for:
        different list sizes and threshold values.
    '''
    p = Profiler()
    masterList = []  #list to contain the lists of different sizes
    resultNoMod = []

    print(" " * 2, end="")
    for exponent in range(
            1, 5
    ):  #this creates the master list using random numbers for different list sizes
        size = 5 * 10**exponent

        lyst = []
        for count in range(size):
            lyst.append(random.randint(1, size + 1))
        masterList.append(lyst)
        print(f"{size:<13}",
              end="")  #this prints the table headers (list size)
    print()

    #this creates the results for non-modified quicksort
    for i in range(0, len(masterList)):
        tempLyst = masterList[
            i][:]  #create list so we don't change the master list
        resultNoMod.append(repr(p.test(quicksort, tempLyst, mod=False)))
    print(resultNoMod, "Not Modified")

    #this creates the results for non-modified quicksort, with different thresholds
    for multiplier in range(1, 5):
        resultMod = []
        thresholdValue = multiplier * 10
        for i in range(0, len(masterList)):
            tempLyst = masterList[
                i][:]  #create list so we don't change the master list
            resultMod.append(
                repr(
                    p.test(quicksort,
                           tempLyst,
                           threshold=thresholdValue,
                           mod=True)))
        print(resultMod, "Threshold =", thresholdValue)  #prints threshold
예제 #28
0
def test_class_profiler_should_calculate_total_volume_correctly():
    parameters = {
        'body_d': 0.4,
        'body_l': 1.0,
        'piston_d': 0.1,
        'piston_l': 0.3,
        'density': 1023.2,
        'depth': 0.0,
        'velocity': 0.0,
        'mass': 12.2
    }

    p = Profiler(**parameters)

    expected_body_volume = 0.12566
    expected_piston_volume = 0.002356
    expected_total_volume = expected_body_volume + expected_piston_volume

    assert np.isclose(expected_total_volume, p.volume, rtol=1e-4, atol=1e-6)
예제 #29
0
def testPlanner(planner,numTrials,maxTime,filename):    
    print "Testing planner for %d trials, %f seconds"%(numTrials,maxTime)
    print "Saving to",filename
    f = open(filename,'w')
    f.write("trial,plan iters,plan time,best cost\n")
    for trial in range(numTrials):
        print
        print "Trial",trial+1
        planner.reset()
        curCost = float('inf')
        t0 = time.time()
        numupdates = 0
        iters = 0
        hadException = False
        while time.time()-t0 < maxTime:
            try:
                planner.planMore(10)
            except Exception as e:
                if hadException:
                    print "Warning, planner raise two exceptions in a row. Quitting"
                    break
                else:
                    print "Warning, planner raised an exception... soldiering on"
                    print e
                    hadException = True
                    continue
            iters += 10
            if planner.bestPathCost != None and planner.bestPathCost != curCost:
                numupdates += 1
                curCost = planner.bestPathCost
                t1 = time.time()
                f.write(str(trial)+","+str(iters)+","+str(t1-t0)+","+str(curCost)+'\n')
        if hasattr(planner,'stats'):
            print
            temp = Profiler()
            temp.items["Stats:"] = planner.stats
            temp.pretty_print()
        print
        print "Final cost:",curCost
        print

        f.write(str(trial)+","+str(iters)+","+str(maxTime)+","+str(curCost)+'\n')
    f.close()
def modifiedQuicksort(lyst, threshold):
    p = Profiler()
    if len(
            lyst
    ) <= threshold:  #I took the instructions to mean less than or equal to because the testing phase is with
        # data of 50 to 5000... use variable to make the threshold editable from the testing side.
        p.test(insertionSort,
               lyst=lyst,
               size=len(lyst),
               comp=False,
               exch=False,
               trace=False)
    else:
        p.test(qs.quicksort,
               lyst=lyst,
               size=len(lyst),
               comp=False,
               exch=False,
               trace=False)