示例#1
0
    def setUp(self):
        TestSetUpDB.setUp(self)

        # set date
        self.date = "2015-04-28"

        # create underlying
        self.underlying = Underlying(symbol="AAPL", company="APPLE INC")

        # create position set
        self.position_set = PositionSet()
        self.position_set.underlying = self.underlying
        self.position_set.name = "EQUITY"
        self.position_set.spread = "LONG_STOCK"
        self.position_set.start_date = datetime.strptime("2015-04-13", "%Y-%m-%d").date()
        self.position_set.stop_date = datetime.strptime("2015-04-28", "%Y-%m-%d").date()
        self.position_set.save()

        # create position opinion
        self.position_opinion = self.create_opinion(
            date=Profiler.move_bday(self.date, 1), position_set=self.position_set, direction="BULL"
        )

        # create profiler now
        self.profiler = Profiler(position_set=self.position_set, date=self.date)
示例#2
0
    def setUp(self):
        TestSetUpDB.setUp(self)

        # set date
        self.date = '2015-04-28'

        # create underlying
        self.underlying = Underlying(symbol='AAPL', company='APPLE INC')

        # create position set
        self.position_set = PositionSet()
        self.position_set.underlying = self.underlying
        self.position_set.name = 'EQUITY'
        self.position_set.spread = 'LONG_STOCK'
        self.position_set.start_date = datetime.strptime(
            '2015-04-13', '%Y-%m-%d').date()
        self.position_set.stop_date = datetime.strptime(
            '2015-04-28', '%Y-%m-%d').date()
        self.position_set.save()

        # create position opinion
        self.position_opinion = self.create_opinion(
            date=Profiler.move_bday(self.date, 1),
            position_set=self.position_set,
            direction='BULL')

        # create profiler now
        self.profiler = Profiler(position_set=self.position_set,
                                 date=self.date)
示例#3
0
    def __init__(self, opts):
        Profiler.__init__(self, opts)
        try:
            self.intervalParts = re.search("([0-9]*)([^0-9]*)",
                                           self.intervalStr)
            if self.intervalParts.group(1) == "":
                self.intervalCount = 1
            else:
                self.intervalCount = int(self.intervalParts.group(1))
            self.intervalUnit = self.intervalParts.group(2)
            self.interval = self.intervalCount * self.intervalFormats[
                self.intervalUnit]["interval"]
            self.format = self.intervalFormats[self.intervalUnit]["format"]
            self.intervalLabel = str(
                self.intervalCount) + " " + self.intervalFormats[
                    self.intervalUnit]["name"]
            if self.intervalCount > 1:
                self.intervalLabel += "s"

        except ValueError as e:
            sys.stderr.write("uhoh: %s\n" % e)

        # gather in a dict with count if aggregating, otherwise in a list
        if aggregate:
            self.items = {}
        else:
            self.items = []
示例#4
0
    def test_create_opinion_button(self):
        """
        Test create opinion button
        """
        print 'run create_opinion_button...'
        opinion_button = self.profiler.create_opinion_button()

        print 'opinion_button:'
        print opinion_button

        self.assertEqual(type(opinion_button), dict)
        self.assertTrue(opinion_button['saved'])
        self.assertEqual(opinion_button['object'].id, self.position_opinion.id)

        print '\n' + '.' * 60 + '\n'
        print 'test false...'
        print '\n' + '.' * 60 + '\n'

        print 'run create_opinion_button...'
        self.profiler = Profiler(self.position_set, date='2015-04-09')
        opinion_button = self.profiler.create_opinion_button()

        self.assertEqual(type(opinion_button), dict)
        self.assertFalse(opinion_button['saved'])

        print 'opinion_button:'
        print opinion_button
示例#5
0
def initial_processing(df, mode):
    if df.memory_usage().sum() > BIG_DATASET_SIZE:
        is_big = True
    else:
        is_big = False

    with Profiler(' - features from datetime'):
        df, date_cols, orig_date_cols = transform_datetime_features(df)

    cat_cols = get_cat_freqs(df)

    numeric_cols = [c for c in df.columns if c.startswith('number')]

    with Profiler(' - reindex new cols'):
        used_cols = date_cols + list(cat_cols) + numeric_cols
        df = df.reindex(columns=used_cols)

    # if is_big:
    #     with Profiler(' - convert to float32'):
    #         df[numeric_cols] = df[numeric_cols].astype(np.float32)

    print(f' - Cat: {len(cat_cols)}, num: {len(numeric_cols)}, date: {len(date_cols)}, orig_dt: {len(orig_date_cols)}')
    print(f' - Used: {len(used_cols)}, memory: {get_mem(df)}')
    params = dict(
        cat_cols=cat_cols,
        numeric_cols=numeric_cols,
        date_cols=date_cols,
        used_cols=used_cols
    )
    return df, params
示例#6
0
def splitWindow(window_size, number_of_splits, reconfig_delta, flows):
	Profiler.start('splitWindow')

	window_split_floor = int(math.floor(float(window_size - (reconfig_delta * (number_of_splits - 1))) / float(number_of_splits)))

	windows = [window_split_floor] * number_of_splits
	extra = window_size - (sum(windows) + reconfig_delta * (number_of_splits - 1))

	if extra >= len(windows):
		raise Exception('Unexpected values when splitting')

	for i in range(extra):
		windows[i] += 1

	if sum(windows) + reconfig_delta * (number_of_splits - 1) != window_size:
		raise Exception('Unexpected values when splitting')

	splits = [(w, {}) for w in windows]
	for k, flow in flows.iteritems():
		assigned_slots = sorted(random.sample(range(number_of_splits), len(flow.route) - 1))

		for i, split in enumerate(assigned_slots):
			this_src = flow.route[i]
			this_dst = flow.route[i + 1]
			this_route = [this_src, this_dst]
			split_flow = input_utils.Flow(flow.id, this_src, this_dst, flow.size, this_route, [this_route])

			splits[split][1][k] = (split_flow, flow.invweight())

	Profiler.end('splitWindow')

	return splits
示例#7
0
    def __init__(self, players):
        pf = Profiler()
        pf.printer.indent()
        pf.printer.silent = not PERFORMANCE_LOGGING

        self.players = players

        # Set up the deck
        deck = INITIAL_DECK[:]
        pf.measure("Set up deck")
        shuffle(deck)

        pf.measure("Shuffle deck")

        self.deck = deck
        """ Current draw deck. Excludes Snitches. """

        self.discard_deck = []
        """ Deck discarded (played) cards. Excludes Snitches. """

        pf.measure("Other set up")
        # Give each player the initial number of Snitches and deal the rest.
        for player in self.players:
            number_of_character_cards_to_deal = INITIAL_CARDS_PER_PLAYER - INITIAL_SNITCHES_PER_PLAYER
            character_hand = self.deck[:number_of_character_cards_to_deal]
            del self.deck[:number_of_character_cards_to_deal]
            player.set_up(
                hand=INITIAL_SNITCH_HAND + character_hand,
                coins=INITIAL_COINS_PER_PLAYER)
            player.prepare()
        
        pf.measure("Give players cards")
示例#8
0
def condense(cx, suite, prefix, name):
    sys.stdout.write('Importing all datapoints for ' + name + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        graphs = find_all_months(cx, prefix, name)
        diff = p.time()
    print('took ' + diff)

    if not len(graphs):
        return

    for when, graph in graphs:
        new_name = prefix + 'condensed-' + name + '-' + str(
            when[0]) + '-' + str(when[1])
        sys.stdout.write('Condensing ' + new_name + '... ')
        sys.stdout.flush()
        with Profiler() as p:
            condense_month(cx, suite, graph, prefix, new_name)
            diff = p.time()
        print(' took ' + diff)

    # Combine everything.
    sys.stdout.write('Aggregating ' + name + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        combined = combine([graph for when, graph in graphs])
        summary = aggregate(combined)
        diff = p.time()
    print('took ' + diff)

    return summary
示例#9
0
def main(rank, world_size):
    global tb_logger, profiler, dataset, log_dir, trainloader, testloader

    if not args.no_tensorboard:
        log_dir = os.path.join('log', 
            args.expid,
            datetime.now().isoformat())

        tb_logger = SummaryWriter(log_dir=log_dir)

    logger = Logger()

    if not args.no_profiler:
        profiler = Profiler(logger, tb_logger, freq=args.profile_freq)
        profiler.log(log_network=args.profile_networkio)

    tb_logger.add_text('params/batch_size', str(args.batch_size/world_size), 1)

    sync_batch = args.batch_size / world_size

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    trainset.train_data = np.split(trainset.train_data, args.split_by)[args.split_to_use]
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=sync_batch, shuffle=False, num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

    for epoch in range(args.epochs):
        train(epoch)
        test(epoch)
示例#10
0
 def __init__(self):
     self.conf_space = ConfSpace()
     self.profiler = Profiler()
     self.predictor = PerfPredict()
     self.exploit_times = 0
     self.exploit_max = 5
     # how many configurations to predict with performance predictor before really runs the benchmark
     self.predict_max = 1000
示例#11
0
    def setUp(self):
        TestUnitSetUp.setUp(self)

        self.position_set = PositionSet.objects.get(id=65)
        self.date = self.position_set.filledorder_set.order_by('trade_summary__date') \
            .last().trade_summary.date.strftime('%Y-%m-%d')

        self.profiler = Profiler(position_set=self.position_set,
                                 date=self.date)
示例#12
0
    def __init__(self, socket):
        log.error(DEBUG, msg='worker __init__')

        self._socket = socket
        self._epoll = select.epoll()
        self._profiler = Profiler()
        self._activity_iterators = {}
        self._child_pids = []
        self._client_conns = []
        self._socket.setblocking(False)
示例#13
0
def reduceToOneHop(flows):
	Profiler.start('reduceToOneHop')
	single_hop_flows = {}

	for k, flow in flows.iteritems():
		single_hop_flows[k] = input_utils.Flow(flow.id, flow.src, flow.dst, flow.size, [flow.src, flow.dst], flow.all_routes)

	Profiler.end('reduceToOneHop')

	return single_hop_flows
示例#14
0
def test_creates_checkpoints_for_profile():
  profiler = Profiler().start('id1').start('id2')
  profiler.profiles['id1'].checkpoint = Mock()
  profiler.profiles['id2'].checkpoint = Mock()

  profiler.checkpoint('id1')
  profiler.checkpoint('id2')

  profiler.profiles['id1'].checkpoint.assert_called_once_with()
  profiler.profiles['id2'].checkpoint.assert_called_once_with()
示例#15
0
 def process(self, tweet):
     Profiler.process(self, tweet)
     # gather a list of the tags in this tweet, lowercased
     savetweet = []
     for tag in tweet['entities']['hashtags']:
         t = tag['text'].lower()
         savetweet.append(t)
         # and increment count for this tag
         self.counts[t] += 1
     # add tag list to savetweets
     self.savetweets.append(savetweet)
示例#16
0
 def process(self, tweet):
     Profiler.process(self, tweet)
     # gather a list of the tags in this tweet, lowercased
     savetweet = []
     for tag in tweet["entities"]["hashtags"]:
         t = tag["text"].lower()
         savetweet.append(t)
         # and increment count for this tag
         self.counts[t] += 1
     # add tag list to savetweets
     self.savetweets.append(savetweet)
示例#17
0
def main():
	# check config file:
	if len(sys.argv) <= 1:
		print 'Please provide config file. '
		return
	config_file = sys.argv[1]
	# parse config file:
	config = Configuration(config_file)
	
	# init the profiler and get it working:
	profiler = Profiler(config)
	profiler.execute()	
def perform_update(cx, machine, direction, prefix, fetch):
    # Fetch the actual data.
    metadata = load_metadata(prefix)
    last_stamp = metadata['last_stamp']
    current_stamp = int(time.time())

    sys.stdout.write('Querying for new rows ' + prefix + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(machine, finish_stamp=(last_stamp + 1, current_stamp))
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' new rows in ' + diff)
    if new_rows == 0:
        metadata['last_stamp'] = current_stamp
        save_metadata(prefix, metadata)
        return 0

    # Break everything into months.
    year = 0
    month = 0
    current = []
    months = []
    for row in rows:
        stamp = int(row[1])
        t = time.gmtime(stamp)
        if t.tm_year != year or t.tm_mon != month:
            if year and len(current):
                months.append(((year, month), current))
            current = []
            year = t.tm_year
            month = t.tm_mon

        current.append(row)

    if len(current):
        months.append(((year, month), current))

    for when, data in months:
        name = prefix + '-' + str(when[0]) + '-' + str(when[1])

        with Profiler() as p:
            if not update_cache(cx, direction, name, when, data):
                renew_cache(cx, machine, direction, prefix, when, fetch)
            diff = p.time()
        sys.stdout.write('Updating cache for ' + name + '...')
        sys.stdout.flush()
        print('took ' + diff)

    metadata['last_stamp'] = current_stamp
    save_metadata(prefix, metadata)

    return new_rows
示例#19
0
def teardown(self):
    self.chrome.wait_event("Page.loadEventFired", timeout=60)
    time.sleep(3)
    cov = self.chrome.Profiler.takePreciseCoverage()
    self.chrome.Profiler.disable()
    #res = c['result']['result']
    cov = Profiler().make_covdata_file(config.cov_data_path, cov, ["zyj"])
    report_file = config.cov_report_path
    with open(report_file, 'wb') as report:
        Profiler().make_profiler_report(stream=report, covdata=cov)
    self.driver.close()
    self.chrome.close()
示例#20
0
def main():
    model = Network()
    checkpoint = torch.load('/mnt/nvme/adas/code/custom_layer/checkpoint_8.pt')
    model.load_state_dict(checkpoint['model_state_dict'])

    P = Profiler(model, (4, 3, 512, 256))
    total_ops, total_params, total_mult, total_add = P.profile()

    print("=====================================================")
    print("Mult Ops: %f GOps" % (total_mult / 1e9))
    print("Add Ops: %f GOps" % (total_add / 1e9))
    print("Total Ops: %f GOps" % (total_ops / 1e9))
    print("Total Parameters: %f * 10^6 " % (total_params / 1e6))
示例#21
0
 def process(self,):
     while 1:
         job = requests_job(FILE_PROCESS_TUBE)
         if job:
             try :
                 filename = job.body
                 profiler = Profiler('/tmp/%s' % (filename))
                 log1 = profiler.line_profiler()
                 log2 = profiler.memory_profiler()
                 self.redis_lib.set_value(log1, 1)
                 self.redis_lib.set_value(log2, 1)
                 job.delete()
             except Exception as ex:
                 job.delete()
示例#22
0
def main():
    # requested test cases per problem specs, plus cases used to determine optimization
    problem_sizes = [50, 160, 200, 275, 500, 5000]
    for problem_size in problem_sizes:
        lyst = []
        for count in range(problem_size):
            lyst.append(random.randint(1, problem_size + 1))
        # test and profile original quicksort
        print("Original quicksort: Problem Size = " + str(problem_size) +
              " integers.")
        print(
            "_______________________________________________________________________"
        )
        profiler = Profiler()
        profiler.test(quicksort, lyst=lyst, comp=True, exch=True)
        print("\n")
        # test and profile modified quicksort
        print("Modified quicksort: Problem Size = " + str(problem_size) +
              " integers.")
        print(
            "_______________________________________________________________________"
        )
        profiler = Profiler()
        profiler.test(mod_quicksort, lyst=lyst, comp=True, exch=True)
        print("\n")
示例#23
0
class TestProfiler2(TestUnitSetUp):
    def setUp(self):
        TestUnitSetUp.setUp(self)

        self.position_set = PositionSet.objects.get(id=65)
        self.date = self.position_set.filledorder_set.order_by('trade_summary__date') \
            .last().trade_summary.date.strftime('%Y-%m-%d')

        self.profiler = Profiler(position_set=self.position_set,
                                 date=self.date)

    def test_create_historical_positions(self):
        """
        Test create historical positions
        """
        print 'run create_historical_positions...'
        historical_positions = self.profiler.create_historical_positions()

        self.assertGreaterEqual(len(historical_positions), 1)

        print 'historical_positions:'
        for historical_position in historical_positions:
            print historical_position

            self.assertEqual(type(historical_position), PositionSet)
            self.assertEqual(historical_position.underlying.id,
                             self.position_set.underlying.id)

    def test_set_stocks(self):
        """
        Test set stocks from position_set date
        """
        print 'empty stocks:'
        print self.profiler.stocks

        print 'run set_stocks...'
        self.profiler.set_stocks()

        print 'stocks:'
        for stock in self.profiler.stocks:
            print stock.date, stock
            self.assertEqual(type(stock), Stock)
            self.assertEqual(stock.symbol, self.position_set.underlying.symbol)

        # make sure date is less than 1 bday of start_date
        self.assertEqual(self.profiler.stocks.last().date,
                         self.profiler.position_set.stop_date)
        self.assertNotEqual(self.profiler.stocks.first().date,
                            self.position_set.start_date)
示例#24
0
    def train(self):
        rnn_type = LSTM

        rnn_dropout = 0.0
        rnn_units = 128
        rnn_timesteps = 128
        rnn_features = 32

        output_size = 1

        batch_size = 512
        epochs = 10000000

        input_shape = (rnn_timesteps, rnn_features)

        if os.path.isfile(self.model_path):
            profiler = Profiler()
            model = load_model(self.model_path,
                               custom_objects={
                                   'root_mean_squared_error':
                                   root_mean_squared_error
                               })
            profiler.stop(f'Loaded model from "{self.model_path}".')
        else:
            model = Sequential()
            model.add(
                rnn_type(rnn_units,
                         dropout=rnn_dropout,
                         return_sequences=False,
                         input_shape=input_shape))
            model.add(Dense(output_size))
            model.add(Activation('tanh'))
            optimizer = Adam(lr=0.01)
            model.compile(optimizer=optimizer, loss=root_mean_squared_error)

        training_generator = SlidingWindowGenerator(self.x_training_wav,
                                                    self.y_training_wav,
                                                    input_shape, output_size,
                                                    batch_size)
        validation_generator = SlidingWindowGenerator(self.x_validation_wav,
                                                      self.y_validation_wav,
                                                      input_shape, output_size,
                                                      batch_size)
        save_callback = SaveCallback(self.model_path)
        history = model.fit_generator(generator=training_generator,
                                      epochs=epochs,
                                      verbose=1,
                                      validation_data=validation_generator,
                                      callbacks=[save_callback])
示例#25
0
def useShortestRouteFlow(flows):
	Profiler.start('useShortestRouteFlow')
	shortest_route_flow = {}

	for k, flow in flows.iteritems():
		shortest_route = None
		for route in flow.all_routes:
			if shortest_route is None or len(shortest_route) > len(route):
				shortest_route = route

		shortest_route_flow[k] = input_utils.Flow(flow.id, flow.src, flow.dst, flow.size, shortest_route, flow.all_routes)

	Profiler.end('useShortestRouteFlow')

	return shortest_route_flow
示例#26
0
def main(size=50000, sort=quicksort):

    lyst = []
    p = Profiler()
    for count in range(size):
        lyst.append(random.randint(1, size + 1))
    print(lyst)
    p.test(sort,
           lyst,
           size,
           unique=True,
           comp=True,
           exch=True,
           trace=False,
           mod=False)
示例#27
0
def perform_update(cx, suite, prefix, fetch):
    # Fetch the actual data.
    metadata = load_metadata(prefix)
    earliest_run_id = metadata['earliest_run_id']

    sys.stdout.write('Querying ' + prefix + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(earliest_run_id)
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' new rows in ' + diff)

    # Break everything into months.
    year = 0
    month = 0
    current = []
    months = []
    for row in rows:
        stamp = int(row[1]) - timezone_offset()
        t = time.gmtime(stamp)
        if t.tm_year != year or t.tm_mon != month:
            if year and len(current):
                months.append(((year, month), current))
            current = []
            year = t.tm_year
            month = t.tm_mon

        current.append(row)

    if len(current):
        months.append(((year, month), current))

    for when, data in months:
        name = prefix + '-' + str(when[0]) + '-' + str(when[1])

        sys.stdout.write('Updating cache for ' + name + '...')
        sys.stdout.flush()
        with Profiler() as p:
            update_cache(cx, suite, name, when, data)
            diff = p.time()
        print('took ' + diff)

    if len(rows):
        metadata['earliest_run_id'] = rows[-1][0]
        save_metadata(prefix, metadata)

    return new_rows
示例#28
0
def run_train_test(ds_name, metric, params, obj):
    path = _DATA_PATH + ds_name
    with Profiler('initial feature selection'):
        x_initial_raw, y_initial, _ = load_data(f'{path}/train.csv',
                                                mode='train',
                                                sample=_SAMPLE)
        x_initial, ini_params = initial_processing(x_initial_raw, mode='train')

        tf = CatTransformer(ini_params['cat_cols'])
        # tf.fit(x_initial)
        x_initial_tf = tf.fit_transform(x_initial)
        selected_features, feat_list = ols_selection(x_initial_tf, y_initial,
                                                     obj)
        hp_params = hyperopt_lgb(x_initial_tf[feat_list], y_initial, params,
                                 obj)

    print('selected features=', len(selected_features))

    x_train_raw, y_train, _ = load_data(f'{path}/train.csv',
                                        mode='train',
                                        sample=_SAMPLE,
                                        used_cols=selected_features)

    x_test_raw, _, _ = load_data(f'{path}/test.csv', mode='test')
    y_test = load_test_label(f'{path}/test-target.csv')

    x_train, train_params = initial_processing(x_train_raw, mode='train')
    x_test, test_params = initial_processing(x_test_raw, mode='test')

    with Profiler('fit transform cat columns'):
        x_test_rein = x_test.reindex(columns=train_params['used_cols'])
        tf = CatTransformer(train_params['cat_cols'])
        tf.fit(x_train)
        x_train_tf = tf.transform(x_train)
        x_test_tf = tf.transform(x_test_rein)

    with Profiler('run train'):
        model = lgb.train(hp_params, lgb.Dataset(x_train_tf, label=y_train),
                          600)

    with Profiler('predict'):
        y_train_out = model.predict(x_train_tf)
        y_test_out = model.predict(x_test_tf)

    train_err = metric(y_train, y_train_out)
    test_err = metric(y_test, y_test_out)

    return train_err, test_err
示例#29
0
class TestProfiler2(TestUnitSetUp):
    def setUp(self):
        TestUnitSetUp.setUp(self)

        self.position_set = PositionSet.objects.get(id=65)
        self.date = (
            self.position_set.filledorder_set.order_by("trade_summary__date")
            .last()
            .trade_summary.date.strftime("%Y-%m-%d")
        )

        self.profiler = Profiler(position_set=self.position_set, date=self.date)

    def test_create_historical_positions(self):
        """
        Test create historical positions
        """
        print "run create_historical_positions..."
        historical_positions = self.profiler.create_historical_positions()

        self.assertGreaterEqual(len(historical_positions), 1)

        print "historical_positions:"
        for historical_position in historical_positions:
            print historical_position

            self.assertEqual(type(historical_position), PositionSet)
            self.assertEqual(historical_position.underlying.id, self.position_set.underlying.id)

    def test_set_stocks(self):
        """
        Test set stocks from position_set date
        """
        print "empty stocks:"
        print self.profiler.stocks

        print "run set_stocks..."
        self.profiler.set_stocks()

        print "stocks:"
        for stock in self.profiler.stocks:
            print stock.date, stock
            self.assertEqual(type(stock), Stock)
            self.assertEqual(stock.symbol, self.position_set.underlying.symbol)

        # make sure date is less than 1 bday of start_date
        self.assertEqual(self.profiler.stocks.last().date, self.profiler.position_set.stop_date)
        self.assertNotEqual(self.profiler.stocks.first().date, self.position_set.start_date)
示例#30
0
    def test_create_opinion_button(self):
        """
        Test create opinion button
        """
        print "run create_opinion_button..."
        opinion_button = self.profiler.create_opinion_button()

        print "opinion_button:"
        print opinion_button

        self.assertEqual(type(opinion_button), dict)
        self.assertTrue(opinion_button["saved"])
        self.assertEqual(opinion_button["object"].id, self.position_opinion.id)

        print "\n" + "." * 60 + "\n"
        print "test false..."
        print "\n" + "." * 60 + "\n"

        print "run create_opinion_button..."
        self.profiler = Profiler(self.position_set, date="2015-04-09")
        opinion_button = self.profiler.create_opinion_button()

        self.assertEqual(type(opinion_button), dict)
        self.assertFalse(opinion_button["saved"])

        print "opinion_button:"
        print opinion_button
示例#31
0
def test_class_profiler_should_calculate_total_acceleration_with_drag_included(
):
    parameters = {
        'body_d': 0.50465,
        'body_l': 1.0,
        'piston_d': 0.0,
        'piston_l': 0.0,
        'density': 1025,
        'depth': 0.0,
        'velocity': 1.0,
        'mass': 50.0
    }

    expected_f_drag = 84.05

    f_buoy = 2010.55
    f_grav = 490.33

    f_total = f_buoy - (expected_f_drag + f_grav)

    p = Profiler(**parameters)
    print(p.acceleration)
    assert np.isclose(expected_f_drag,
                      p.drag.drag,
                      rtol=1e-4,
                      atol=1e-6,
                      equal_nan=False)
示例#32
0
def test_class_profiler_should_calculate_total_acceleration_correctly_balanced_forces(
):
    # Testing to see acceleration = 0 when F_gravity == F_buoyancy

    parameters = {
        'body_d': 0.50465,
        'piston_d': 0.0,
        'piston_l': 0.0,
        'density': 1025,
        'depth': 0.0,
        'velocity': 0.0,
        'mass': 50.0
    }

    # f_b  - f_g = 0
    # f_g = m * g
    # f_b = V * r * g

    # f_b = m * acc_b
    # m * acc_b - m * g = 0
    # acc_b - g = 0
    f_g = parameters['mass'] * g

    volume = parameters['mass'] / parameters['density']
    area = ((parameters['body_d'] / 2)**2) * np.pi
    length = volume / area

    parameters['body_l'] = length

    p = Profiler(**parameters)

    assert 0 == p.acceleration
示例#33
0
def test_class_profiler_should_initialize_with_given_parameters():
    parameters = {
        'body_d': 0.4,
        'body_l': 1.0,
        'piston_d': 0.1,
        'piston_l': 0.3,
        'density': 1023.2,
        'depth': 0.0,
        'velocity': 0.0,
        'mass': 12.2
    }

    p = Profiler(**parameters)

    expected_body_volume = 0.12566
    expected_piston_volume = 0.002356

    assert np.isclose(expected_body_volume,
                      p.body.volume,
                      rtol=1e-3,
                      atol=1e-4)
    assert np.isclose(expected_piston_volume,
                      p.piston.volume,
                      rtol=1e-3,
                      atol=1e-4)
    assert parameters['density'] == p.water.density
    assert parameters['depth'] == p.water.depth
    assert parameters['velocity'] == p.velocity
    assert parameters['mass'] == p._mass
示例#34
0
def renew_cache(cx, machine, suite, prefix, when, last_stamp, fetch):
    delete_cache(prefix + '-' + str(when[0]) + '-' + str(when[1]))

    # Delete corresponding condensed graph
    if prefix[0:3] == "raw":
        delete_cache("condensed" + prefix[3:] + '-' + str(when[0]) + '-' +
                     str(when[1]))
    else:
        delete_cache("bk-condensed" + prefix[6:] + '-' + str(when[0]) + '-' +
                     str(when[1]))

    dt = datetime.datetime(year=when[0], month=when[1], day=1)
    start_stamp = int(time.mktime(dt.timetuple()))

    next_month = when[1] + 1
    next_year = when[0]
    if next_month == 13:
        next_month = 1
        next_year += 1
    dt = datetime.datetime(year=next_year, month=next_month, day=1)
    stop_stamp = int(time.mktime(dt.timetuple())) - 1
    if last_stamp < stop_stamp:
        stop_stamp = last_stamp

    # Querying all information from this month.
    sys.stdout.write('Querying ' + prefix + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(machine, test_stamp=(start_stamp, stop_stamp))
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' rows in ' + diff)

    name = prefix + '-' + str(when[0]) + '-' + str(when[1])
    update_cache(cx, suite, name, when, rows)
示例#35
0
def renew_cache(cx, machine, suite, prefix, when, fetch):
    delete_cache(prefix + '-' + str(when[0]) + '-' + str(when[1]));

    # Delete corresponding condensed graph
    before, after = prefix.split("raw", 1)
    delete_cache(before + "condensed" + after + '-' + str(when[0]) + '-' + str(when[1]));

    dt = datetime.datetime(year=when[0], month=when[1], day=1)
    start_stamp = int(time.mktime(dt.timetuple()))

    next_month = when[1] + 1
    next_year = when[0]
    if next_month == 13:
        next_month = 1
        next_year += 1
    dt = datetime.datetime(year=next_year, month=next_month, day=1)
    stop_stamp = int(time.mktime(dt.timetuple())) - 1

    name = prefix + '-' + str(when[0]) + '-' + str(when[1])

    # Querying all information from this month.
    sys.stdout.write('Fetching monthly info ' + name + '... ')
    sys.stdout.flush()
    with Profiler() as p:
        rows = fetch(machine, approx_stamp=(start_stamp,stop_stamp))
        diff = p.time()
    new_rows = len(rows)
    print('found ' + str(new_rows) + ' rows in ' + diff)

    update_cache(cx, suite, name, when, rows) 
示例#36
0
 def __init__(self):
     self.conf_space = ConfSpace()
     self.semantics = self.conf_space.hadoop_semantics
     self.profiler = Profiler()
     # self.predictor = PerfPredict()
     self.profile_num = 0  # initial profiling index
     # self.performance_track = []
     # self.K_iters_for_convergence_test = 5   # check the last 5 iterations
     # self.perf_improvement_threshold = 0.02   # 2% of the improvement on the last cycle
     self.best_conf = None
     self.global_improvement = 0.5
     self.max_iter = 6000
     self.initial_perf = sys.maxsize
     self.type_checker = HadoopConfChecker()
     self.type_checker.set_all_param_value(self.conf_space.param_values)
     self.invalid_confs = []
     self.curr_genconf_folder = cfg.gen_confs + os.sep + 'conf'
    def __init__(self, init_state: tuple):
        self.init_state = init_state

        # saves states tuples, uses a double ended queue;
        # can behave both as a FIFO queue and as a LIFO stack
        self.fringe = deque()

        # saves states tuples; uses a set for speed when checking if a state
        # has been explored
        self.explored = set()

        # saves nodes details for every node state:
        # nodeState -> (nodeParent, action, cost, depth)
        self.node_db = dict()

        self.profiler = Profiler()
        self.set_goal()
示例#38
0
    def __init__(self, library, plan, attributes, world = None):
        """Initialises the agent to use the given library and plan.
        
        The plan has to be given as the plan name without the '.lap' extension.
        The attributes are the ones that are assigned to the behaviours
        when they are initialised. The world is the one that can be accessed
        by the behaviours by the L{AgentBase.getWorld} method.
        
        Note that when the behaviours are loaded from the given library, then
        they are reflected onto the agent object. That means, given that
        there is a behaviour called 'bot', then it can be accessed from another
        behaviour either by self.agent.getBehaviour("bot"), or by
        self.agent.bot. Consequently, behaviour names that clash with already
        existing agent attributes cause an AttributeError to be raise upon
        initialising the behaviours.
        
        The attributes are to be given in the same format as for the
        method L{AgentBase.assignAttributes}.
        
        @param library: The behaviour library to use.
        @type library: string
        @param plan: The plan to use (without the '.lap' ending).
        @type plan: string
        @param attributes: The attributes to be assigned to the behaviours
        @type attributes: as for L{AgentBase.assignAttributes}
        """
        # get unique id for agent first, as constructor of LogBase accesses it
        self.id = unique_agent_id()
        LogBase.__init__(self, self, "")
       # store library for use when spawning new agents
        self._library = library
        self._world = world
        # we need to set the random number generator before we
        # load the behaviours, as they might access it upon
        # construction
        self.random = random
        # if you are profiling, you need to fix this in your init_world.  see library/latchTest for an example & documentation
        # do this before loading Behaviours
        self.profiler=Profiler.initProfile(self) 
        # load and register the behaviours, and reflect back onto agent
        self._bdict = self._loadBehaviours()
        self._reflectBehaviours()
        # more for the profiler
        # FIXME: PR -- is there another place to do this?  will it succeed without MASON? JJB 1 March 2008
        try:
            other.profiler.set_second_name(other._bdict._behaviours['MASON'].name)
        except:
            # may want this for debugging:  print "profiler is off and/or MASON is not being used"
            pass # normally don't expect profiling, nor necessarily MASON
 
        # assign the initial attributes to the behaviours
        self.assignAttributes(attributes)
        # load the plan
        self._loadPlan(get_plan_file(library, plan))
        # loop thread control
        self._exec_loop = False
        self._loop_pause = False
示例#39
0
def testPlanner(planner,numTrials,maxTime,filename):    
    print "Testing planner for %d trials, %f seconds"%(numTrials,maxTime)
    print "Saving to",filename
    f = open(filename,'w')
    f.write("trial,plan iters,plan time,best cost\n")
    for trial in range(numTrials):
        print
        print "Trial",trial+1
        planner.reset()
        curCost = float('inf')
        t0 = time.time()
        numupdates = 0
        iters = 0
        hadException = False
        while time.time()-t0 < maxTime:
            try:
                planner.planMore(10)
            except Exception as e:
                if hadException:
                    print "Warning, planner raise two exceptions in a row. Quitting"
                    break
                else:
                    print "Warning, planner raised an exception... soldiering on"
                    print e
                    hadException = True
                    continue
            iters += 10
            if planner.bestPathCost != None and planner.bestPathCost != curCost:
                numupdates += 1
                curCost = planner.bestPathCost
                t1 = time.time()
                f.write(str(trial)+","+str(iters)+","+str(t1-t0)+","+str(curCost)+'\n')
        if hasattr(planner,'stats'):
            print
            temp = Profiler()
            temp.items["Stats:"] = planner.stats
            temp.pretty_print()
        print
        print "Final cost:",curCost
        print

        f.write(str(trial)+","+str(iters)+","+str(maxTime)+","+str(curCost)+'\n')
    f.close()
示例#40
0
    def setUp(self):
        TestUnitSetUp.setUp(self)

        self.position_set = PositionSet.objects.get(id=65)
        self.date = (
            self.position_set.filledorder_set.order_by("trade_summary__date")
            .last()
            .trade_summary.date.strftime("%Y-%m-%d")
        )

        self.profiler = Profiler(position_set=self.position_set, date=self.date)
示例#41
0
class Benchmark:

    def __init__(self, benchmarkGroupId, benchmarkRunId, buildSettings, **kwargs):
        if(kwargs.has_key("remote") and kwargs["remote"]==True and (kwargs.has_key("dirBinary") or kwargs.has_key("hyriseDBPath"))):
            print "dirBinary and hyriseDBPath cannot be used with remote"
            exit()

        self._pid               = os.getpid()
        self._id                = benchmarkGroupId
        self._runId             = benchmarkRunId
        self._buildSettings     = buildSettings
        self._userClass         = kwargs["userClass"] if kwargs.has_key("userClass") else user.User
        self._numUsers          = kwargs["numUsers"] if kwargs.has_key("numUsers") else 1
        self._mysqlDB           = kwargs["mysqlDB"] if kwargs.has_key("mysqlDB") else "cbtr"
        self._mysqlHost         = kwargs["mysqlHost"] if kwargs.has_key("mysqlHost") else "vm-hyrise-jenkins.eaalab.hpi.uni-potsdam.de"
        self._mysqlPort         = kwargs["mysqlPort"] if kwargs.has_key("mysqlPort") else 3306
        self._mysqlUser         = kwargs["mysqlUser"] if kwargs.has_key("mysqlUser") else "hyrise"
        self._mysqlPass         = kwargs["mysqlPass"] if kwargs.has_key("mysqlPass") else "hyrise"
        self._papi              = kwargs["papi"] if kwargs.has_key("papi") else "NO_PAPI"
        self._prepQueries       = kwargs["prepareQueries"] if kwargs.has_key("prepareQueries") else queries.QUERIES_PREPARE
        self._prepArgs          = kwargs["prepareArgs"] if kwargs.has_key("prepareArgs") else {"db": "cbtr"}
        self._queries           = kwargs["benchmarkQueries"] if kwargs.has_key("benchmarkQueries") else queries.QUERIES_ALL
        self._host              = kwargs["host"] if kwargs.has_key("host") else "127.0.0.1"
        self._port              = kwargs["port"] if kwargs.has_key("port") else 5000
        self._warmuptime        = kwargs["warmuptime"] if kwargs.has_key("warmuptime") else 0
        self._runtime           = kwargs["runtime"] if kwargs.has_key("runtime") else 5
        self._thinktime         = kwargs["thinktime"] if kwargs.has_key("thinktime") else 0
        self._manual            = kwargs["manual"] if kwargs.has_key("manual") else False
        self._rebuild           = kwargs["rebuild"] if kwargs.has_key("rebuild") else False
        self._userArgs          = kwargs["userArgs"] if kwargs.has_key("userArgs") else {"queries": self._queries}
        self._stdout            = kwargs["showStdout"] if kwargs.has_key("showStdout") else False
        self._stderr            = kwargs["showStderr"] if kwargs.has_key("showStderr") else True
        self._remote            = kwargs["remote"] if kwargs.has_key("remote") else False
        self._dirBinary         = kwargs["dirBinary"] if kwargs.has_key("dirBinary") else os.path.join(os.getcwd(), "builds/%s" % buildSettings.getName())
        self._dirHyriseDB       = kwargs["hyriseDBPath"] if kwargs.has_key("hyriseDBPath") else self._dirBinary
        self._dirResults        = os.path.join(os.getcwd(), "results", self._id, self._runId, buildSettings.getName())
        # self._queryDict         = self._readDefaultQueryFiles()
        self._queryDict         = {}
        self._session           = requests.Session()
        self._serverThreads     = kwargs["serverThreads"] if kwargs.has_key("serverThreads") else 0
        self._collectPerfData   = kwargs["collectPerfData"] if kwargs.has_key("collectPerfData") else False
        self._useJson           = kwargs["useJson"] if kwargs.has_key("useJson") else False
        self._build             = None
        self._serverProc        = None
        self._users             = []
        self._scheduler         = kwargs["scheduler"] if kwargs.has_key("scheduler") else "PartitionedQueuesScheduler"
        self._serverIP          = kwargs["serverIP"] if kwargs.has_key("serverIP") else "127.0.0.1"
        self._remoteUser        = kwargs["remoteUser"] if kwargs.has_key("remoteUser") else "hyrise"
        self._remotePath        = kwargs["remotePath"] if kwargs.has_key("remotePath") else "/home/" + kwargs["remoteUser"] + "/benchmark"
        self._abQueryFile       = kwargs["abQueryFile"] if kwargs.has_key("abQueryFile") else None
        self._abCore            = kwargs["abCore"] if kwargs.has_key("abCore") else 2
        self._verbose           = kwargs["verbose"] if kwargs.has_key("verbose") else 1
        self._write_to_file     = kwargs["write_to_file"] if kwargs.has_key("write_to_file") else None
        self._write_to_file_count = kwargs["write_to_file_count"] if kwargs.has_key("write_to_file_count") else None
        self._checkpoint_interval = str(kwargs["checkpointInterval"]) if kwargs.has_key("checkpointInterval") else None
        self._commit_window     = str(kwargs["commitWindow"]) if kwargs.has_key("commitWindow") else None
        self._csv                = kwargs["csv"] if kwargs.has_key("csv") else False
        self._nodes             = kwargs["nodes"] if kwargs.has_key("nodes") else None
        self._vtune             = os.path.expanduser(kwargs["vtune"]) if kwargs.has_key("vtune") and kwargs["vtune"] is not None else None
        self._with_profiler     = kwargs["profiler"] if kwargs.has_key("profiler") else None
        self._profiler = None
        self._memNode           = kwargs["memNode"] if kwargs.has_key("memNode") else None

        if self._vtune is not None:
            self._manual = True        
        if self._remote:
            self._ssh               = paramiko.SSHClient()
        else:
            self._ssh           = None
        self._exiting           = False

        self._session.headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
        if not os.path.isdir(self._dirResults):
            os.makedirs(self._dirResults)

    def benchPrepare(self):
        """ implement this in subclasses """
        pass

    def loadTables(self):
        """ implement this in subclasses """
        pass

    def benchAfterLoad(self):
        """ implement this in subclasses """
        pass

    def benchBeforeStop(self):
        """ implement this in subclasses """
        pass

    def benchAfter(self):
        """ implement this in subclasses """
        pass

    def preexec(self): # Don't forward signals.
        os.setpgrp()

    def allUsersFinished(self):
        for user in self._users:
            if user.is_alive():
                return False
        print "All users have terminated."
        return True

    def run(self):
        filename = self._dirResults + "/ab.log"
        i = 1
        while not os.path.isfile(filename):
            print "Starting benchmark try", i, "for file", filename
            self.run_real()
            i = i + 1

    def run_real(self):

        try:
            signal.signal(signal.SIGINT, self._signalHandler)
        except:
            print "Could not add signal handler."

        if self._with_profiler is not None:        
            self._profiler = Profiler(self._dirBinary)
            self._profiler.setup(self._with_profiler)

        print "+------------------+"
        print "| HYRISE benchmark |"
        print "+------------------+\n"

        if self._remote:
            subprocess.call(["mkdir", "-p", "remotefs/" + self._host])
            subprocess.call(["fusermount", "-u", "remotefs/127.0.0.1"])
            subprocess.Popen(["sshfs", self._remoteUser + "@" + self._host + ":" + self._remotePath, "remotefs/" + self._host + "/"], preexec_fn = self.preexec)
            self._olddir = os.getcwd()
            os.chdir("remotefs/" + self._host + "/")
            self._dirBinary         = os.path.join(os.getcwd(), "builds/%s" % self._buildSettings.getName())
            self._dirHyriseDB       = os.path.join(os.getcwd(), "hyrise")
            self._startSSHConnection()


        if not self._manual:
            # no support for building on remote machine yet
            self._buildServer()
            if self._abQueryFile != None:
                self._buildAb()
            self._startServer()
            print "---\nHYRISE server running on port %s\n---" % self._port
        else:
            print "---\nManual mode, expecting HYRISE server running on port %s\n---" % self._port

        self._runPrepareQueries()

        print "Preparing benchmark..."
        self.benchPrepare()
        self.loadTables()
        self.benchAfterLoad()

        if self._vtune is not None:
            subprocess.check_output("amplxe-cl -command resume", cwd=self._vtune, shell=True)

        if self._with_profiler is not None:        
            print "---\n"
            self._profiler.start(str(self._serverProc.pid))

        if self._runtime > 0:
            if self._abQueryFile != None:
                print "---"
                print "Using ab with queryfile=" + self._abQueryFile + ", concurrency=" + str(self._numUsers) + ", time=" + str(self._runtime) +"s"
                print "Output File: ", self._dirResults + "/ab.log"
                print "---"
                ab = subprocess.Popen(["./ab/ab","-g", self._dirResults + "/ab.log", "-l", str(self._abCore), "-v", str(self._verbose), "-k", "-t", str(self._runtime), "-n", "99999999", "-c", str(self._numUsers), "-m", self._abQueryFile, self._host+":"+str(self._port)+"/procedure/"])
                ab.wait()
            else:
                self._createUsers()
                sys.stdout.write("Starting %s user(s)...\r" % self._numUsers)
                sys.stdout.flush()
                for i in range(self._numUsers):
                    sys.stdout.write("Starting %s user(s)... %i%%      \r" % (self._numUsers, (i+1.0) / self._numUsers * 100))
                    sys.stdout.flush()
                    self._users[i].start()
                print "Starting %s user(s)... done     " % self._numUsers

                for i in range(self._warmuptime):
                    sys.stdout.write("Warming up... %i   \r" % (self._warmuptime - i))
                    sys.stdout.flush()
                    if self.allUsersFinished():
                        break
                    time.sleep(1)
                print "Warming up... done     "

                sys.stdout.write("Logging results for %i seconds... \r" % self._runtime)
                sys.stdout.flush()
                for i in range(self._numUsers):
                    self._users[i].startLogging()
                for i in range(self._runtime):
                    sys.stdout.write("Logging results for %i seconds... \r" % (self._runtime - i))
                    sys.stdout.flush()
                    if self.allUsersFinished():
                        break
                    time.sleep(1)
                #time.sleep(self._runtime)
                for i in range(self._numUsers):
                    self._users[i].stopLogging()
                print "Logging results for %i seconds... done" % self._runtime

                sys.stdout.write("Stopping %s user(s)...\r" % self._numUsers)
                sys.stdout.flush()
                for i in range(self._numUsers):
                    self._users[i].stop()
                print "users stopped"
                time.sleep(2)
                for i in range(self._numUsers):
                    sys.stdout.write("Stopping %s user(s)... %i%%      \r" % (self._numUsers, (i+1.0) / self._numUsers * 100))
                    sys.stdout.flush()
                    self._users[i].join()
                print "Stopping %s user(s)... done     " % self._numUsers
        if self._vtune is not None:
            subprocess.check_output("amplxe-cl -command stop", cwd=self._vtune, shell=True)
        self.benchBeforeStop()
        self._stopServer()


        if self._with_profiler is not None:
            print "---\n"
            self._profiler.end()

        print "all set"

        if self._remote:
            os.chdir(self._olddir)

        self.benchAfter()


    def addQuery(self, queryId, queryStr):
        if self._queryDict.has_key(queryId):
            raise Exception("a query with id '%s' is already registered" % queryId)
        else:
            self._queryDict[queryId] = queryStr

    def addQueryFile(self, queryId, filename):
        if self._queryDict.has_key(queryId):
            raise Exception("a query with id '%s' is already registered" % queryId)
        else:
            self._queryDict[queryId] = open(filename).read()

    def setUserClass(self, userClass):
        self._userClass = userClass

    def setUserArgs(self, args):
        self._userArgs = args

    def fireQuery(self, queryString, queryArgs={}, sessionContext=None, autocommit=False):
        query = queryString % queryArgs
        data = {"query": query}
        if sessionContext: data["sessionContext"] = sessionContext
        if autocommit: data["autocommit"] = "true"
        return self._session.post("http://%s:%s/" % (self._host, self._port), data=data)

    # def _readDefaultQueryFiles(self):
    #     cwd = os.getcwd()
    #     queryDict = {}
    #     for queryId, filename in queries.QUERY_FILES.iteritems():
    #         queryDict[queryId] = open(os.path.join(cwd, filename)).read()
    #     return queryDict

    def _buildAb(self):
        sys.stdout.write("Building ab tool... ")
        sys.stdout.flush()
        process = subprocess.Popen("make ab", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd="./ab")
        (stdout, stderr) = process.communicate()
        returncode = process.returncode
        if returncode != 0:
            print stderr
            raise Exception("ERROR: building ab tool failed with return code %s:\n===\n%s" % (self._settings.getName(), returncode, stderr))
        else:
            print "done"

    def _buildServer(self):
        sys.stdout.write("%suilding server for build '%s'... " % ("B" if not self._rebuild else "Reb", self._buildSettings.getName()))
        sys.stdout.flush()
        if self._build == None:
            self._build = build.Build(settings=self._buildSettings, ssh=self._ssh, remotePath = self._remotePath)
            if self._rebuild:
                self._build.makeClean()
            self._build.makeAll()
        elif self._rebuild:
            self._build.makeClean()
            self._build.makeAll()
        print "done"

    def _startServer(self, paramString=""):
        if not self._remote:
            sys.stdout.write("Starting server for build '%s'... " % self._buildSettings.getName())
            sys.stdout.flush()

            env = {
                "HYRISE_DB_PATH"    : self._dirHyriseDB,
                "LD_LIBRARY_PATH"   : self._dirBinary+":/usr/local/lib64/",
                "HYRISE_MYSQL_PORT" : str(self._mysqlPort),
                "HYRISE_MYSQL_HOST" : self._mysqlHost,
                "HYRISE_MYSQL_USER" : self._mysqlUser,
                "HYRISE_MYSQL_PASS" : self._mysqlPass
            }
            if self._buildSettings.oldMode():
                server = os.path.join(self._dirBinary, "hyrise_server")
            else:
                server = os.path.join(self._dirBinary, "hyrise-server_%s" % self._buildSettings["BLD"])

            #server = os.path.join(self._dirBinary, "hyrise-server_debug")

            logdef = os.path.join(self._dirBinary, "log.properties")
            threadstring = ""
            if (self._serverThreads > 0):
                threadstring = "--threads=%s" % self._serverThreads

            checkpoint_str = ""
            if (self._checkpoint_interval != None):
                checkpoint_str = "--checkpointInterval=%s" % self._checkpoint_interval

            commit_window_str = ""
            if (self._commit_window != None):
                commit_window_str = "--commitWindow=%s" % self._commit_window

            nodes_str = ""
            if (self._nodes != None):
                nodes_str = "--nodes=%s" % self._nodes
            
            mem_node_str = ""
            if (self._memNode != None):
                mem_node_str = "--memorynode=%s" % self._memNode

            self._serverProc = subprocess.Popen([server, "--port=%s" % self._port, "--logdef=%s" % logdef, "--scheduler=%s" % self._scheduler, nodes_str, checkpoint_str, threadstring, commit_window_str, mem_node_str],
                                                cwd=self._dirBinary,
                                                env=env,
                                                stdout=open("/dev/null") if not self._stdout else None,
                                                stderr=open("/dev/null") if not self._stderr else None)
        else:
            self._startRemoteServer()

        time.sleep(1)
        print "done"


    def _startRemoteServer(self):
        print("Starting server for build '%s'... remotely on '%s'" % (self._buildSettings.getName(), self._host))

        env = "HYRISE_DB_PATH="+str(self._dirHyriseDB)+\
              " LD_LIBRARY_PATH="+str(self._dirBinary)+":/usr/local/lib64/"+\
              " HYRISE_MYSQL_PORT="+str(self._mysqlPort)+\
              " HYRISE_MYSQL_HOST="+str(self._mysqlHost)+\
              " HYRISE_MYSQL_USER="******" HYRISE_MYSQL_PASS="******"hyrise_server")
        else:
            server = os.path.join(self._dirBinary, "hyrise-server_%s" % self._buildSettings["BLD"])

        logdef = os.path.join(self._dirBinary, "log.properties")

        threadstring = ""

        if (self._serverThreads > 0):
            threadstring = "--threads=%s" % self._serverThreads

        # note: there is an issue with large outputs of the server command;
        # the remote command hangs, probably when the channel buffer is full
        # either write to /dev/null on server machine of a file on server side
        # otherwise, get the transport and read from a channel
        command_str = "cd " + str(self._dirBinary) + "; env " + env + " " + server + " --port=%s" % self._port + " --logdef=%s" % logdef + " --scheduler=%s" % self._scheduler + " " + threadstring + " 2&>1 &> ~/hyriselog"
        command_str = command_str.replace(os.path.join(os.getcwd()), self._remotePath)
        stdin, stdout, stderr = self._ssh.exec_command(command_str);

        time.sleep(1)
        print "done"


    def _runPrepareQueries(self):
        if self._prepQueries == None or len(self._prepQueries) == 0:
            return
        numQueries = len(self._prepQueries)
        for i, q in enumerate(self._prepQueries):
            sys.stdout.write("Running prepare queries... %i%%      \r" % ((i+1.0) / numQueries * 100))
            sys.stdout.flush()
            try:
                r = self.fireQuery(self._queryDict[q], self._prepArgs)
                #self._session.post("http://%s:%s/" % (self._host, self._port), data={"query": queryString})
            except Exception:
                print "Running prepare queries... %i%% --> Error" % ((i+1.0) / numQueries * 100)
        print "Running prepare queries... done"



    def _createUsers(self):
        for i in range(self._numUsers):
            self._users.append(self._userClass(userId=i, host=self._host, port=self._port, dirOutput=self._dirResults, queryDict=self._queryDict, collectPerfData=self._collectPerfData, useJson=self._useJson, write_to_file=self._write_to_file, write_to_file_count=self._write_to_file_count, **self._userArgs))

    def _stopServer(self):
        if not self._remote:
            if not self._manual and self._serverProc:
                sys.stdout.write("Stopping server... ")
                sys.stdout.flush()
                self._serverProc.terminate()
                time.sleep(0.5)
                if self._serverProc.poll() is None:
                    #print "Server still running. Waiting 2 sec and forcing shutdown..."
                    time.sleep(2)
                    self._serverProc.kill()
                time.sleep(0.5)
                if self._serverProc.poll() is None:
                    subprocess.call(["killall", "-u", os.getlogin(), "hyrise-server_release"])
                time.sleep(5)
        else:
            print "kill server, close connection"
            self._ssh.exec_command("killall hyrise-server_release");
            self._stopSSHConnection()
        print "done."

    def _signalHandler(self, signal, frame):
        if os.getppid() == self._pid or self._exiting:
            return
        self._exiting = True
        print "\n*** received SIGINT, initiating graceful shutdown"
        if self._build:
            self._build.unlink()
        for u in self._users:
            u.stopLogging()
            u.stop()
        self._stopServer()
        for u in self._users:
            u.join()
        exit()

    def _startSSHConnection(self):
        self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        # expects authentication per key on remote server
        self._ssh.connect(self._host, username=self._remoteUser)
        print "connected"

    def _stopSSHConnection(self):
        self._ssh.close()
示例#42
0
    def run_real(self):

        try:
            signal.signal(signal.SIGINT, self._signalHandler)
        except:
            print "Could not add signal handler."

        if self._with_profiler is not None:        
            self._profiler = Profiler(self._dirBinary)
            self._profiler.setup(self._with_profiler)

        print "+------------------+"
        print "| HYRISE benchmark |"
        print "+------------------+\n"

        if self._remote:
            subprocess.call(["mkdir", "-p", "remotefs/" + self._host])
            subprocess.call(["fusermount", "-u", "remotefs/127.0.0.1"])
            subprocess.Popen(["sshfs", self._remoteUser + "@" + self._host + ":" + self._remotePath, "remotefs/" + self._host + "/"], preexec_fn = self.preexec)
            self._olddir = os.getcwd()
            os.chdir("remotefs/" + self._host + "/")
            self._dirBinary         = os.path.join(os.getcwd(), "builds/%s" % self._buildSettings.getName())
            self._dirHyriseDB       = os.path.join(os.getcwd(), "hyrise")
            self._startSSHConnection()


        if not self._manual:
            # no support for building on remote machine yet
            self._buildServer()
            if self._abQueryFile != None:
                self._buildAb()
            self._startServer()
            print "---\nHYRISE server running on port %s\n---" % self._port
        else:
            print "---\nManual mode, expecting HYRISE server running on port %s\n---" % self._port

        self._runPrepareQueries()

        print "Preparing benchmark..."
        self.benchPrepare()
        self.loadTables()
        self.benchAfterLoad()

        if self._vtune is not None:
            subprocess.check_output("amplxe-cl -command resume", cwd=self._vtune, shell=True)

        if self._with_profiler is not None:        
            print "---\n"
            self._profiler.start(str(self._serverProc.pid))

        if self._runtime > 0:
            if self._abQueryFile != None:
                print "---"
                print "Using ab with queryfile=" + self._abQueryFile + ", concurrency=" + str(self._numUsers) + ", time=" + str(self._runtime) +"s"
                print "Output File: ", self._dirResults + "/ab.log"
                print "---"
                ab = subprocess.Popen(["./ab/ab","-g", self._dirResults + "/ab.log", "-l", str(self._abCore), "-v", str(self._verbose), "-k", "-t", str(self._runtime), "-n", "99999999", "-c", str(self._numUsers), "-m", self._abQueryFile, self._host+":"+str(self._port)+"/procedure/"])
                ab.wait()
            else:
                self._createUsers()
                sys.stdout.write("Starting %s user(s)...\r" % self._numUsers)
                sys.stdout.flush()
                for i in range(self._numUsers):
                    sys.stdout.write("Starting %s user(s)... %i%%      \r" % (self._numUsers, (i+1.0) / self._numUsers * 100))
                    sys.stdout.flush()
                    self._users[i].start()
                print "Starting %s user(s)... done     " % self._numUsers

                for i in range(self._warmuptime):
                    sys.stdout.write("Warming up... %i   \r" % (self._warmuptime - i))
                    sys.stdout.flush()
                    if self.allUsersFinished():
                        break
                    time.sleep(1)
                print "Warming up... done     "

                sys.stdout.write("Logging results for %i seconds... \r" % self._runtime)
                sys.stdout.flush()
                for i in range(self._numUsers):
                    self._users[i].startLogging()
                for i in range(self._runtime):
                    sys.stdout.write("Logging results for %i seconds... \r" % (self._runtime - i))
                    sys.stdout.flush()
                    if self.allUsersFinished():
                        break
                    time.sleep(1)
                #time.sleep(self._runtime)
                for i in range(self._numUsers):
                    self._users[i].stopLogging()
                print "Logging results for %i seconds... done" % self._runtime

                sys.stdout.write("Stopping %s user(s)...\r" % self._numUsers)
                sys.stdout.flush()
                for i in range(self._numUsers):
                    self._users[i].stop()
                print "users stopped"
                time.sleep(2)
                for i in range(self._numUsers):
                    sys.stdout.write("Stopping %s user(s)... %i%%      \r" % (self._numUsers, (i+1.0) / self._numUsers * 100))
                    sys.stdout.flush()
                    self._users[i].join()
                print "Stopping %s user(s)... done     " % self._numUsers
        if self._vtune is not None:
            subprocess.check_output("amplxe-cl -command stop", cwd=self._vtune, shell=True)
        self.benchBeforeStop()
        self._stopServer()


        if self._with_profiler is not None:
            print "---\n"
            self._profiler.end()

        print "all set"

        if self._remote:
            os.chdir(self._olddir)

        self.benchAfter()
示例#43
0
class TestProfilerLongStock(TestSetUpDB):
    def create_opinion(self, date, position_set, direction, decision="HOLD"):
        """
        Create position opinion
        :param date: str or datetime
        :param direction: str
        :param decision: str
        :return: PositionOpinion
        """
        position_opinion = PositionOpinion()

        position_opinion.position_set = position_set
        position_opinion.date = date
        position_opinion.direction = direction
        position_opinion.decision = decision
        position_opinion.save()

        return position_opinion

    def setUp(self):
        TestSetUpDB.setUp(self)

        # set date
        self.date = "2015-04-28"

        # create underlying
        self.underlying = Underlying(symbol="AAPL", company="APPLE INC")

        # create position set
        self.position_set = PositionSet()
        self.position_set.underlying = self.underlying
        self.position_set.name = "EQUITY"
        self.position_set.spread = "LONG_STOCK"
        self.position_set.start_date = datetime.strptime("2015-04-13", "%Y-%m-%d").date()
        self.position_set.stop_date = datetime.strptime("2015-04-28", "%Y-%m-%d").date()
        self.position_set.save()

        # create position opinion
        self.position_opinion = self.create_opinion(
            date=Profiler.move_bday(self.date, 1), position_set=self.position_set, direction="BULL"
        )

        # create profiler now
        self.profiler = Profiler(position_set=self.position_set, date=self.date)

    def test_move_bday(self):
        """
        Test next bday that skip holidays and offdays
        """
        print "using date: %s" % self.date

        for day in [1, -1]:
            print "run next_bday..."
            next_bday = self.profiler.move_bday(self.date, day)
            """:type: datetime"""

            print "get result: %s" % next_bday

            self.assertEqual(type(next_bday), datetime)
            self.assertNotEqual(self.date, next_bday)

            self.assertFalse(is_holiday(next_bday))
            self.assertFalse(is_offdays(next_bday))

            print "." * 60

    def test_create_opinion_button(self):
        """
        Test create opinion button
        """
        print "run create_opinion_button..."
        opinion_button = self.profiler.create_opinion_button()

        print "opinion_button:"
        print opinion_button

        self.assertEqual(type(opinion_button), dict)
        self.assertTrue(opinion_button["saved"])
        self.assertEqual(opinion_button["object"].id, self.position_opinion.id)

        print "\n" + "." * 60 + "\n"
        print "test false..."
        print "\n" + "." * 60 + "\n"

        print "run create_opinion_button..."
        self.profiler = Profiler(self.position_set, date="2015-04-09")
        opinion_button = self.profiler.create_opinion_button()

        self.assertEqual(type(opinion_button), dict)
        self.assertFalse(opinion_button["saved"])

        print "opinion_button:"
        print opinion_button

    def test_create_position_opinions(self):
        """
        Create position opinions for profiler view
        """
        # create position opinion
        self.position_opinion0 = self.create_opinion(
            date="2015-04-08", position_set=self.position_set, direction="BULL"
        )
        self.position_opinion1 = self.create_opinion(
            date="2015-04-09", position_set=self.position_set, direction="BEAR"
        )

        position_opinions = self.profiler.create_position_opinions()

        for position_opinion in position_opinions:
            print "opinion: %s" % position_opinion
            print "bull: %s" % position_opinion.bull
            print "bear: %s" % position_opinion.bear

            print "." * 60

            self.assertEqual(type(position_opinion.bull["count"]), int)
            self.assertGreaterEqual(position_opinion.bull["count"], 0)
            self.assertEqual(type(position_opinion.bull["correct"]), int)
            self.assertGreaterEqual(position_opinion.bull["correct"], 0)
            self.assertEqual(type(position_opinion.bull["correct_pct"]), float)
            self.assertGreaterEqual(position_opinion.bull["correct_pct"], 0)
            self.assertEqual(type(position_opinion.bull["wrong"]), int)
            self.assertGreaterEqual(position_opinion.bull["wrong"], 0)
            self.assertEqual(type(position_opinion.bull["wrong_pct"]), float)
            self.assertGreaterEqual(position_opinion.bull["wrong_pct"], 0)

            self.assertEqual(type(position_opinion.bear["count"]), int)
            self.assertGreaterEqual(position_opinion.bear["count"], 0)
            self.assertEqual(type(position_opinion.bear["correct"]), int)
            self.assertGreaterEqual(position_opinion.bear["correct"], 0)
            self.assertEqual(type(position_opinion.bear["correct_pct"]), float)
            self.assertGreaterEqual(position_opinion.bear["correct_pct"], 0)
            self.assertEqual(type(position_opinion.bear["wrong"]), int)
            self.assertGreaterEqual(position_opinion.bear["wrong"], 0)
            self.assertEqual(type(position_opinion.bear["wrong_pct"]), float)
            self.assertGreaterEqual(position_opinion.bear["wrong_pct"], 0)

    def test_create_position_dates(self):
        """
        Test create position dates
        """
        position_dates = self.profiler.create_position_dates()

        print "position_dates:"
        pprint(position_dates)

        self.assertEqual(type(position_dates), dict)

        self.assertFalse(position_dates["dte"])
        self.assertFalse(position_dates["expire_date"])
        self.assertEqual(position_dates["pass_bdays"], 12)
        self.assertEqual(position_dates["pass_days"], 16)

        self.assertEqual(position_dates["start_date"], datetime.strptime("2015-04-13", "%Y-%m-%d").date())
        self.assertEqual(position_dates["stop_date"], datetime.strptime("2015-04-28", "%Y-%m-%d").date())
示例#44
0
def test_start_should_create_inital_profile():
  profiler = Profiler()
  expect(profiler.profiles).to.be.empty

  profiler.start('profile id')
  expect('profile id').to.be.within(profiler.profiles)
示例#45
0
def test_stop_should_return_profiler_instance():
  profiler = Profiler()
  expect(profiler.start('id').stop('id')).to.equal(profiler)
示例#46
0
 def process(self, tweet):
     Profiler.process(self, tweet)
示例#47
0
    --------
    232792560

    Copyright (c) 2012, T. Zengerink
    See: https://raw.github.com/Mytho/project-euler-python/master/LISENCE
"""
from profiler import Profiler


def can_be_devided_by_list(n, l):
    """See if a number can be devided by all numbers in a list."""
    if n == 0:
        return False
    for i in l:
        if n % i:
            return False
    return True


def smallest_divisible_for_range(start, end):
    """Get the highest number that is divisible by all numbers in a range."""
    i = 0
    while not can_be_devided_by_list(i, range(end, start, -1)):
        i += end
    return i


if __name__ == "__main__":
    print(smallest_divisible_for_range(1, 20))
    Profiler.report()
示例#48
0
from profiler import Profiler
from algorithms import selectionSort

p = Profiler()
p.test(selectionSort, size = 15, comp = True,
       exch = True, trace = True)