def image(self, rendererContext, size): timer = Timer() if self.__timing else None transform = rendererContext.coordinateTransform() ext = rendererContext.extent() mapToPixel = rendererContext.mapToPixel() size = QSize((ext.xMaximum()-ext.xMinimum())/mapToPixel.mapUnitsPerPixel(), (ext.yMaximum()-ext.yMinimum())/mapToPixel.mapUnitsPerPixel()) \ if abs(mapToPixel.mapRotation()) < .01 else size if transform: ext = transform.transform(ext) if transform.destCRS() != self.__destCRS: self.__destCRS = transform.destCRS() vtx = numpy.array(self.__meshDataProvider.nodeCoord()) def transf(x): p = transform.transform(x[0], x[1]) return [p.x(), p.y(), x[2]] vtx = numpy.apply_along_axis(transf, 1, vtx) self.__glMesh.resetCoord(vtx) self.__glMesh.setColorPerElement(self.__meshDataProvider.valueAtElement()) img = self.__glMesh.image( self.__meshDataProvider.elementValues() if self.__meshDataProvider.valueAtElement() else self.__meshDataProvider.nodeValues(), size, (.5*(ext.xMinimum() + ext.xMaximum()), .5*(ext.yMinimum() + ext.yMaximum())), (mapToPixel.mapUnitsPerPixel(), mapToPixel.mapUnitsPerPixel()), mapToPixel.mapRotation()) if self.__timing: print timer.reset("render 2D mesh image") return img
def __init__(self, location, graphics, heading=Vector(1, 0)): super().__init__() self.graphics = graphics self.heading = Vector(0, -1) self.direction = Direction.NORTH self.moving = False self.type = type self.maxHitPoints = 10 self.hitpoints = self.maxHitPoints self.movementSpeed = 1 self.scorePoints = 0 self.shielded = False self.shieldEndTime = 0 self.weapon = Weapon(self, level=1) self.controller = None self.controllerTimer = Timer(50) tileBlockedFunction = lambda tile: not tile is None and tile.blocksMovement self.movementHandler = MovementHandler(self, tileBlockedFunction) self.setLocation(location) self.setHeading(heading) self.lastHitTime = None self.lastHitVector = Vector(0, 0) self.destroyCallback = None self.destroyed = False
def __init__(self, entity): self.entity = entity self.fireTimer = Timer(500) self.lastMovementTime = pygame.time.get_ticks() self.pendingPathSearch = None self.plannedPath = None self.pathPlanTime = 0 self.searchGridFunction = SearchGridGenerator.getSearchSpaceCellValueForTile self.stepLength = 50
def __init__(self, raw_data: pd.DataFrame, strategy_creator, fields_to_tune: tuple, need_recompute_ti=False): self.data = raw_data self.strategy_creator = strategy_creator self.fields_to_tune = fields_to_tune self.need_recompute_ti = need_recompute_ti self.strategy = None self.backtest = None self.report = None self.timer = Timer() self.initialize()
def main(occurrence_filepath: str, ocr_text_filepath: str, image_folder: str, analysis: pd.DataFrame): # analysis = prep_comparison_data.main(occurrence_filepath, ocr_text_filepath) # calculate_changes.main(analysis, 150) processors: List[ImageProcessor] = [GCVProcessor(), AWSProcessor()] # for idx, row in analysis.iterrows(): images = [0, 100, 200, 300, 400, 500, 600, 700] label_image_save_location = 'test_results\\label_finder-2021_04_26' fig_count = 1 for idx in images: row = analysis.iloc[idx, :] barcode = row.at['ground_truth', 'barcode'] image_location = image_folder + os.path.sep + barcode + '.jpg' label_searcher = Timer(barcode) for i, processor in enumerate(processors): processor.load_image_from_file(image_location) print(processor.current_image_width) print(processor.current_label_width) print(processor.current_image_height) print(processor.current_label_height) label_points = processor.find_label_location() analysis.loc[idx, (processor.name, 'label_upper_left')] = '%s,%s' % (label_points[0][0], label_points[0][1]) analysis.loc[idx, (processor.name, 'label_upper_right')] = '%s,%s' % ( label_points[1][0], label_points[1][1]) analysis.loc[idx, (processor.name, 'label_lower_right')] = '%s,%s' % ( label_points[2][0], label_points[2][1]) analysis.loc[idx, (processor.name, 'label_lower_left')] = '%s,%s' % (label_points[3][0], label_points[3][1]) plot_words_and_label(fig_count, processor, processor.current_image_height, processor.current_image_width, processor.get_found_word_locations(), label_points, label_image_save_location) fig_count += 1 label_searcher.stop() print('Image %i / %i processed by both OCRs in %.2f sec.' % (idx + 1, analysis.shape[0], label_searcher.duration)) return analysis
def __init__(self, screen_rect, bg_img): # Setup Screen and background image self.screen = pygame.display.set_mode((screen_rect.w, screen_rect.h)) self.screen_rect = screen_rect self.bg_img = bg_img.convert() # Init starfield (layer above background, slowly scrolls on loop) self.star_field = \ objects.object_types["starField"]( speed = -0.3, surface = self.screen) # Init player ship, add it to shipGroup self.player_ship = objects.object_types["ship"](position=(50, 50), activate=True) ship.Ship.shipGroup.add(self.player_ship) # Init the alien infested asteroid, add to infestedGroup self.infested_asteroid = objects.object_types["infested"]( position=(self.screen_rect.w - 150, 200)) infested.Infested.infestedGroup.add(self.infested_asteroid) # Init space station, add to spaceStation group self.space_station = objects.object_types["spaceStation"]( position=(75, self.screen_rect.h / 2)) spaceStation.SpaceStation.spaceStationGroup.add(self.space_station) # Init scorekeeping, add to score group self.score = objects.object_types["score"]( position=(self.screen_rect.w, 0), time=time.time(), spaceStation=self.space_station) score.Score.scoreGroup.add(self.score) # For the graph section self.paths = None self.graph_timer = Timer(150 / 1000, self.graph_update) # TODO: remove this demo feature self.OPTIONS = 0
def start_game(self): # working on tkinter thread print(f"PLAYERS: {self.game.players}") self.queue = Cq(self.game.players) self.timer = Timer() self.game.deal_cards() self.msg = 'start' index = -1 while True: # everything that the server does self.message_list = [self.game, index] # [game_obj , index of player] print(self.message_list[0].curr_card) for i in range(0, len(self.list_of_client_sockets)): if (self.message_list[0].curr_card.number == 11): # for draw 2 for i in range(0, 2): random_index = random.randrange( 0, len(self.message_list[0].available_cards)) self.message_list[0].players[(i + 1) % len( self.list_of_client_sockets)].cards.append( self.message_list[0]. available_cards[random_index]) self.message_list[0].available_cards.remove( self.message_list[0].available_cards[random_index]) self.message_list[1] = i # index = i self.message_list[0].players[i].is_turn = True self.list_of_client_sockets[i].send( pickle.dumps(self.message_list)) try: recieved_pickled_message = self.list_of_client_sockets[ i].recv(1024 * 4) self.message_list = pickle.loads(recieved_pickled_message) except: pass self.game = self.message_list[0] index = -1
def test_timer(self): timer = Timer(0.05) self.assertFalse(timer.update()) time.sleep(0.05) self.assertTrue(timer.update())
def train_gan(X, path, epochs=2, batch_size=64): if path[-1] != '/': path += '/' os.makedirs(path + 'checkpoints', exist_ok=True) X_benignware = X[0] X_malware = X[1] #Creating Models input_shape = X_malware.shape[1] opt = Adam(learning_rate=0.001) generator, discriminator, gan = create_gan(input_shape, path, opt, name='GAN') #creating callback for saving checkpoints cb = Callbacks(ckpt_path=path + 'checkpoints', models=[generator, discriminator, gan]) steps_per_epoch = X_malware.shape[0] // batch_size history_epochs = { 'Disc_Loss': [], 'Disc_Acc': [], 'Gen_Loss': [], 'Gen_Acc': [], 'Batch_Data': [] } chk = input('\n\nStart training GANs (y/N): ') if chk.lower() == 'y': for epoch in range(1, epochs + 1): time = Timer() history_batch = { 'Disc_Loss': [], 'Disc_Acc': [], 'Gen_Loss': [], 'Gen_Acc': [], 'Batch_Data': [] } bg = BatchGenerator(X_malware, batch_size, batch_shape=None) for batch in range(1, steps_per_epoch + 1): #start the timer time.start() #Getting next batch batch_malware = bg.get_nextBatch() #generating features from Generator gen_features = generator.predict(batch_malware) #getting samples from benignware for concatenation batch_benignware = X_benignware[np.random.randint( 0, X_benignware.shape[0], size=batch_size)] #converting sparse to dense batch_benignware = batch_benignware.todense() # #Concatenating Benignware and generated features. # x = np.concatenate((batch_benignware, gen_features)) #Generating labels for x #disc_y = np.zeros(2*batch_size) # disc_y[:batch_size] = 1.0 #Train Discriminator #disc_metric = discriminator.train_on_batch(x, disc_y) #Generating labels for x disc_y = np.full(batch_size, 0) #Train Discriminator disc_metric0 = discriminator.train_on_batch( gen_features, disc_y) #Generating labels for x disc_y = np.full(batch_size, 1) #Train Discriminator disc_metric1 = discriminator.train_on_batch( batch_benignware, disc_y) disc_metric = [(disc_metric0[0] + disc_metric1[0]) / 2, (disc_metric0[1] + disc_metric1[1]) / 2] history_batch['Disc_Loss'].append(disc_metric[0]) history_batch['Disc_Acc'].append(disc_metric[1]) #Train Generator using GAN model y_gen = np.ones(batch_size) gen_metric = gan.train_on_batch(batch_malware, y_gen) gen_metric = gan.train_on_batch(batch_malware, y_gen) gen_metric = gan.train_on_batch(batch_malware, y_gen) gen_metric = gan.train_on_batch(batch_malware, y_gen) gen_metric = gan.train_on_batch(batch_malware, y_gen) history_batch['Gen_Loss'].append(gen_metric[0]) history_batch['Gen_Acc'].append(gen_metric[1]) #Printing info of batch time_remain, time_taken = time.get_time_hhmmss( steps_per_epoch - batch) timers = (time_remain, time_taken) history = (history_batch, history_epochs) info_out('batch', history, timers, epoch, epochs, batch, steps_per_epoch) #computing loss & accuracy over one epoch history_epochs['Disc_Loss'].append( sum(history_batch['Disc_Loss']) / steps_per_epoch) history_epochs['Disc_Acc'].append( sum(history_batch['Disc_Acc']) / steps_per_epoch) history_epochs['Gen_Loss'].append( sum(history_batch['Gen_Loss']) / steps_per_epoch) history_epochs['Gen_Acc'].append( sum(history_batch['Gen_Acc']) / steps_per_epoch) history_epochs['Batch_Data'].append(history_batch) history = (history_batch, history_epochs) info_out(which='epoch', history=history, epoch=epoch, total_time=time.get_total_time()) cb.ckpt_callback(epoch, history_epochs) elif chk.lower() == 'n': SystemExit #Writing history to disk dump(history_epochs, open(path + 'checkpoints/history.obj', 'wb')) return history_epochs
class StrategyTuner(): def __init__(self, raw_data: pd.DataFrame, strategy_creator, fields_to_tune: tuple, need_recompute_ti=False): self.data = raw_data self.strategy_creator = strategy_creator self.fields_to_tune = fields_to_tune self.need_recompute_ti = need_recompute_ti self.strategy = None self.backtest = None self.report = None self.timer = Timer() self.initialize() def initialize(self): self.update_strategy_param() self.initialize_backtest() self.report = StrategyTunerReport(self.strategy, self.strategy_creator) def initialize_backtest(self): self.backtest = Backtest(self.data, [self.strategy]) self.backtest.initialize() self.backtest.data_preprocess() self.backtest.compute_technical_indicator() def simulate(self, param_values: list): self.update_strategy_param(param_values) param_config = self.strategy_creator.__dict__ if self.report.param_already_exists(param_config): logging.warn( f'[StrategyTuner] Skipping this param because it is already simulated: {param_config}' ) return self.backtest.strategies = [self.strategy] self.backtest.initialize() if self.need_recompute_ti: self.initialize_backtest() self.backtest.back_test() self.backtest.gen_report(only='general_report') self.record_simulation() def record_simulation(self): self.report.write(backtest_result=dict( self.backtest.report.general_report.df.iloc[0]), param_config=self.strategy_creator.__dict__) def update_strategy_param(self, param_values=None): if not param_values: for field in self.fields_to_tune: self.strategy_creator.__dict__[field.name] = field.low_bound else: for i in range(len(self.fields_to_tune)): field = self.fields_to_tune[i] self.strategy_creator.__dict__[field.name] = field.values[ param_values[i]] # TO DO: param_values list self.strategy = self.strategy_creator.create() # recursion def do_step(self, counters, lengths, level): if level == len(counters): self.simulate(param_values=counters) msg = self.write_step_msg(counters) self.timer.time(msg) else: counters[level] = 0 while True: if counters[level] >= lengths[level]: break # do something self.do_step(counters, lengths, level + 1) counters[level] += 1 def write_step_msg(self, param_values: list): d = dict() for i in range(len(self.fields_to_tune)): field = self.fields_to_tune[i] d[field.name] = field.values[param_values[i]] return str(d) def run(self): counters = [0] * len(self.fields_to_tune) lengths = [len(field.values) for field in self.fields_to_tune] self.do_step(counters, lengths, 0)
def asift_main(image1: str, image2: str, detector_name: str = "sift-flann"): """ Main function of ASIFT Python implementation. :param image1: Path for first image :param image2: Path for second image :param detector_name: (sift|surf|orb|akaze|brisk)[-flann] Detector type to use, default as SIFT. Add '-flann' to use FLANN matching. :return: None (Will return coordinate pairs in future) """ # It seems that FLANN has performance issues, may be replaced by CUDA in future # Read images ori_img1 = cv2.imread(image1, cv2.IMREAD_GRAYSCALE) ori_img2 = cv2.imread(image2, cv2.IMREAD_GRAYSCALE) # Initialize feature detector and keypoint matcher detector, matcher = init_feature(detector_name) # Exit when reading empty image if ori_img1 is None or ori_img2 is None: print("Failed to load images") sys.exit(1) # Exit when encountering unknown detector parameter if detector is None: print(f"Unknown detector: {detector_name}") sys.exit(1) ratio_1 = 1 ratio_2 = 1 if ori_img1.shape[0] > MAX_SIZE or ori_img1.shape[1] > MAX_SIZE: ratio_1 = MAX_SIZE / ori_img1.shape[1] print("Large input detected, image 1 will be resized") img1 = image_resize(ori_img1, ratio_1) else: img1 = ori_img1 if ori_img2.shape[0] > MAX_SIZE or ori_img2.shape[1] > MAX_SIZE: ratio_2 = MAX_SIZE / ori_img2.shape[1] print("Large input detected, image 2 will be resized") img2 = image_resize(ori_img2, ratio_2) else: img2 = ori_img2 print(f"Using {detector_name.upper()} detector...") # Profile time consumption of keypoints extraction with Timer(f"Extracting {detector_name.upper()} keypoints..."): pool = ThreadPool(processes=cv2.getNumberOfCPUs()) kp1, desc1 = affine_detect(detector, img1, pool=pool) kp2, desc2 = affine_detect(detector, img2, pool=pool) print(f"img1 - {len(kp1)} features, img2 - {len(kp2)} features") # Profile time consumption of keypoints matching with Timer('Matching...'): raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2) p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: # TODO: The effect of resizing on homography matrix needs to be investigated. # TODO: Investigate function consistency when image aren't resized. for index in range(len(p1)): pt = p1[index] p1[index] = pt / ratio_1 for index in range(len(p2)): pt = p2[index] p2[index] = pt / ratio_2 for index in range(len(kp_pairs)): element = kp_pairs[index] kp1, kp2 = element new_kp1 = cv2.KeyPoint(kp1.pt[0] / ratio_1, kp1.pt[1] / ratio_1, kp1.size) new_kp2 = cv2.KeyPoint(kp2.pt[0] / ratio_2, kp2.pt[1] / ratio_2, kp2.size) kp_pairs[index] = (new_kp1, new_kp2) H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) print(f"{np.sum(status)} / {len(status)} inliers/matched") # do not draw outliers (there will be a lot of them) kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag] else: H, status = None, None print(f"{len(p1)} matches found, not enough for homography estimation") # kp_pairs: list[(cv2.KeyPoint, cv2.KeyPoint)] draw_match("ASIFT Match Result", ori_img1, ori_img2, kp_pairs, None, H) # Visualize result cv2.waitKey() log_keypoints( kp_pairs, "sample/keypoints.txt") # Save keypoint pairs for further inspection print('Done')
def main(): logging.basicConfig( level=logging.DEBUG, format='%(asctime)s:%(process)d:%(levelname)s:%(name)s:%(message)s') parser = argparse.ArgumentParser(description='GMRT CNN Training') parser.add_argument('--batch-size', type=int, default=20000, metavar='N', help='input batch size for training (default: 20000)') parser.add_argument('--epochs', type=int, default=5, metavar='N', help='number of epochs to train (default: 5)') parser.add_argument('--learning-rate', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--keep-probability', type=float, default=0.6, metavar='K', help='Dropout keep probability (default: 0.6)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--num-processes', type=int, default=4, metavar='N', help='how many training processes to use (default: 4)') parser.add_argument('--use-gpu', action='store_true', default=False, help='use the GPU if it is available') parser.add_argument('--data-path', default='./data', help='the path to the data file') parser.add_argument('--data-file', default='data.h5', help='the name of the data file') parser.add_argument('--sequence-length', type=int, default=10, help='how many elements in a sequence') parser.add_argument('--validation-percentage', type=int, default=10, help='amount of data used for validation') parser.add_argument('--training-percentage', type=int, default=80, help='amount of data used for training') parser.add_argument('--seed', type=int, default=None, metavar='S', help='random seed (default: 1)') parser.add_argument('--learning-rate-decay', type=float, default=0.8, metavar='LRD', help='the initial learning rate decay rate') parser.add_argument('--start-learning-rate-decay', type=int, default=5, help='the epoch to start applying the LRD') parser.add_argument('--short_run', type=int, default=None, help='use a short run of the test data') parser.add_argument('--save', type=str, default=None, help='path to save the final model') kwargs = vars(parser.parse_args()) LOGGER.debug(kwargs) # If the have specified a seed get a random if kwargs['seed'] is not None: np.random.seed(kwargs['seed']) else: np.random.seed() if kwargs['use_gpu'] and torch.cuda.is_available(): LOGGER.info('Using cuda devices: {}'.format(torch.cuda.device_count())) kwargs['cuda_device_count'] = torch.cuda.device_count() kwargs['using_gpu'] = True else: LOGGER.info('Using CPU') kwargs['cuda_device_count'] = 0 kwargs['using_gpu'] = False # Do this first so all the data is built before we go parallel and get race conditions with Timer('Checking/Building data file'): build_data(**kwargs) rfi_data = RfiData(**kwargs) if kwargs['using_gpu']: # The DataParallel will distribute the model to all the available GPUs # model = nn.DataParallel(GmrtCNN(kwargs['keep_probability'])).cuda() model = nn.DataParallel( GmrtLinear(kwargs['keep_probability'], kwargs['sequence_length'])).cuda() # Train train(model, rfi_data, **kwargs) else: # This uses the HOGWILD! approach to lock free SGD # model = GmrtCNN(kwargs['keep_probability']) model = GmrtLinear(kwargs['keep_probability'], kwargs['sequence_length']) model.share_memory( ) # gradients are allocated lazily, so they are not shared here processes = [] for rank in range(kwargs['num_processes']): p = mp.Process(target=train, args=(model, rfi_data, rank), kwargs=kwargs) p.start() processes.append(p) for p in processes: p.join() with Timer('Reading final test data'): test_loader = data.DataLoader( rfi_data.get_rfi_dataset('test', short_run_size=kwargs['short_run']), batch_size=kwargs['batch_size'], num_workers=1, pin_memory=kwargs['using_gpu'], ) with Timer('Final test'): test_epoch(model, test_loader, kwargs['log_interval']) if kwargs['save'] is not None: with Timer('Saving model'): with open(kwargs['save'], 'wb') as save_file: torch.save(model.state_dict(), save_file)
def train_GAN(self, X_train, epochs, batch_size, batch_shape, name, gan_summary=False, tensorboard=True): """ Parameters ---------- X_train : TYPE DESCRIPTION. epochs : TYPE DESCRIPTION. batch_size : TYPE DESCRIPTION. batch_shape : TYPE DESCRIPTION. name : TYPE DESCRIPTION. gan_summary : TYPE, optional DESCRIPTION. The default is False. Returns ------- TYPE DESCRIPTION. """ self.input_shape = (batch_shape[1], batch_shape[2]) generator, discriminator, gan_model = self.get_gan_model(name) if tensorboard: # Get the sessions graph #graph = K.get_session().graphs self.tensorboard_callback(models=[generator, discriminator, gan_model]) if gan_summary: gan_model.summary() print('Note: In the GAN(combined model) Discriminator parameters are set to non-trainable because while training Generator, we do not train Discriminator!') steps_per_epoch = len(X_train)//batch_size chk = input('\n\nStart training y/N: ') if chk.lower()=='y': for epoch in range(1, epochs+1): #setting up timer class time =Timer() bg = BatchGenerator(X_train, batch_size=batch_size) for batch in range(1, steps_per_epoch): #start the timer time.start() # X_reshaped is used data to get the output from generator model. X is the data in # orignal dimensions e.g [batches,features], while X_reshaped is the data for LSTM # layer as LSTM layer 3D data so X_reshaped has dimensions [batches, timesteps, features] #whereas x_t1 is the data at time t+1 or next batch X, X_reshaped, x_t1 = bg.get_nextBatch(batch_shape) #Getting the data for discrimnator training. X_disc, Y_disc, X_fake = bg.get_disc_gan_data(generator, X, X_reshaped, x_t1) """ train discriminator """ metrics = discriminator.train_on_batch(X_disc, Y_disc) self.history_batch['Disc_Loss'].append(metrics[0]) self.history_batch['Disc_Acc'].append(metrics[1]) #train generator self.train_generator(generator, gan_model, X_reshaped, x_t1, X_fake) #Getting total time taken by a batch self.time_remain, self.time_taken = time.get_time_hhmmss(steps_per_epoch-batch) self.info_out('batch', epoch, epochs, batch, steps_per_epoch) #computing loss & accuracy over one epoch self.history_epoch['Disc_Loss'].append(sum(self.history_batch['Disc_Loss'])/steps_per_epoch) self.history_epoch['Disc_Acc'].append(sum(self.history_batch['Disc_Acc'])/steps_per_epoch) self.history_epoch['Gen_Loss'].append(sum(self.history_batch['Gen_Loss'])/steps_per_epoch) self.history_epoch['Gen_Acc'].append(sum(self.history_batch['Gen_Acc'])/steps_per_epoch) self.history_epoch['Batch_Data'].append(self.history_batch) self.info_out(which='epoch', epoch=epoch, total_time=time.get_total_time()) self.ckpt_callback(epoch, [generator, discriminator, gan_model]) elif chk.lower()=='n': SystemExit return self.history_epoch
def main(): # TODO Handle options for input/output # TODO Add flags to determine what gets read sw = Timer() print("Flattening business data") sw.start() business_data = flatten_business_data('data/yelp_academic_dataset_business.json') sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Outputting business data") sw.start() business_data.to_csv("processed_data/business_data.csv", index=False) sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Flattening check in data") sw.start() checkin_data = flatten_checkin_data('data/yelp_academic_dataset_checkin.json') sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Outputting check in data") sw.start() checkin_data.to_csv("processed_data/checkin_data.csv", index=False) sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Flattening tip data") sw.start() tip_data = flatten_tip_data('data/yelp_academic_dataset_tip.json') sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Outputting tip data") sw.start() tip_data.to_csv("processed_data/tip_data.csv", index=False) sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Flattening review data") sw.start() review_data = flatten_review_data('data/yelp_academic_dataset_review.json') sw.stop() print("Time: %f" % sw.elapsed) sw.reset() print("Outputting review data") sw.start() review_data.to_csv("processed_data/review_data.csv", index=False) sw.stop() print("Time: %f" % sw.elapsed) sw.reset()
import utilities from utilities import Vector from utilities import Timer playerTank = None score = 0 lives = 3 tankSpawns = [] base = None liveEnemyTanks = [] currentLevel = 1 powerupSpawner = PowerupSpawner() powerupTimer = Timer(40000) overlayText = None overlayHideTime = None def startNewGame(): global lives, score lives = 5 score = 0 recreatePlayerTank() loadLevel(1) def loadNextLevel(): loadLevel(currentLevel + 1)
# The number, 197, is called a circular prime because all rotations of the digits: # 197, 971, and 719, are themselves prime. # There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97. # How many circular primes are there below one million? # 3.47s, too slow from math import sqrt from utilities import e_sieve, Timer timer = Timer() primes_under_mill = e_sieve( 999999 ) # Uses sieve of Erastosthenes, check utilities.py for implementation def is_not_prime(num): for i in range(2, int(sqrt(num)) + 1): if num % i == 0: # If num is evenly divisible by any number 2-sqrt(num) it cannot be prime return True return False def is_circular_prime(number): number = str(number) rotations = {number[x:] + number[:x] for x in range(len(number))} # Generates rotations for p in rotations: if is_not_prime( int(p) ): # If any rotation is not prime, number is not a circular prime
class AiTankController(TankController): def __init__(self, entity): self.entity = entity self.fireTimer = Timer(500) self.lastMovementTime = pygame.time.get_ticks() self.pendingPathSearch = None self.plannedPath = None self.pathPlanTime = 0 self.searchGridFunction = SearchGridGenerator.getSearchSpaceCellValueForTile self.stepLength = 50 def update(self, time, timePassed): if self.fireTimer.update(time): self.fire(time) if self.isPathPlanningPending() and self.isPathPlanningCompleted(): if self.pendingPathSearch.pathFound(): self.plannedPath = PlannedPath(self.pendingPathSearch.getPath()) self.resetLastMovementTime(pygame.time.get_ticks()) self.pendingPathSearch = None def render(self, screen): pass #self.renderPlannedPath(screen) def renderPlannedPath(self, screen): if self.plannedPath != None: image = images.get('projectile') for step in self.plannedPath.path: screen.blit(image, ((step[0] * 8) + 8, step[1] * 8)) def pathRecalculationNeeded(self, time): return (not self.hasPath() and not self.isPathPlanningPending()) or self.isPlannedPathExpired(time) def isPlannedPathExpired(self, time): return time - self.pathPlanTime > 5000 def canMoveAlongPath(self): return self.hasPath() and not self.plannedPath.targetReached() def moveAlongPath(self, time): movementSteps = int((time - self.lastMovementTime ) / self.stepLength) if movementSteps > 0: for _ in range(movementSteps): self.stepTowardsTarget() if self.plannedPath.targetReached(): break self.resetLastMovementTime(time) def resetLastMovementTime(self, time): self.lastMovementTime = time def stepTowardsTarget(self): targetStep = self.toWorldSpaceTuple(self.plannedPath.getTargetStep()) self.moveTowardsLocation(targetStep) self.plannedPath.moveToNextStepIfCurrentStepIsReached(self.entity.getLocation().toIntTuple()) def fire(self, time): self.entity.fire(time) self.pickRandomFireTime() def pickRandomFireTime(self): self.fireTimer.setInterval(random.randint(400, 600)) def plotPathToLocation(self, targetLocation): searchGrid = SearchGridGenerator.generateSearchGridFromPlayfield(self.searchGridFunction) start = self.toSearchSpaceCoordinateTuple(self.entity.getLocation()) end = self.toSearchSpaceCoordinateTuple(targetLocation) self.pendingPathSearch = PathFindingTask(searchGrid, start, end) PathfinderWorker.queueTask(self.pendingPathSearch) self.pathPlanTime = pygame.time.get_ticks() def isPathPlanningPending(self): return self.pendingPathSearch != None def isPathPlanningCompleted(self): return self.pendingPathSearch != None and self.pendingPathSearch.isCompleted() def hasPath(self): return self.plannedPath != None def moveTowardsLocation(self, targetLocation): location = self.entity.location if location.x < targetLocation[0]: self.entity.moveSingleStep(utilities.vectorRight) elif location.x > targetLocation[0]: self.entity.moveSingleStep(utilities.vectorLeft) elif location.y < targetLocation[1]: self.entity.moveSingleStep(utilities.vectorDown) elif location.y > targetLocation[1]: self.entity.moveSingleStep(utilities.vectorUp) def toSearchSpaceCoordinateTuple(self, coordinates): return (int(coordinates.x / 8), int(coordinates.y / 8)) def toWorldSpaceTuple(self, coordinates): return (int(coordinates[0] * 8), int(coordinates[1] * 8))
class GUI(): def __init__(self, screen_rect, bg_img): # Setup Screen and background image self.screen = pygame.display.set_mode((screen_rect.w, screen_rect.h)) self.screen_rect = screen_rect self.bg_img = bg_img.convert() # Init starfield (layer above background, slowly scrolls on loop) self.star_field = \ objects.object_types["starField"]( speed = -0.3, surface = self.screen) # Init player ship, add it to shipGroup self.player_ship = objects.object_types["ship"](position=(50, 50), activate=True) ship.Ship.shipGroup.add(self.player_ship) # Init the alien infested asteroid, add to infestedGroup self.infested_asteroid = objects.object_types["infested"]( position=(self.screen_rect.w - 150, 200)) infested.Infested.infestedGroup.add(self.infested_asteroid) # Init space station, add to spaceStation group self.space_station = objects.object_types["spaceStation"]( position=(75, self.screen_rect.h / 2)) spaceStation.SpaceStation.spaceStationGroup.add(self.space_station) # Init scorekeeping, add to score group self.score = objects.object_types["score"]( position=(self.screen_rect.w, 0), time=time.time(), spaceStation=self.space_station) score.Score.scoreGroup.add(self.score) # For the graph section self.paths = None self.graph_timer = Timer(150 / 1000, self.graph_update) # TODO: remove this demo feature self.OPTIONS = 0 def p1_button(self, e, down): ''' Handles user input, allowing player_ship to be piloted Args: event e, a pygame event down, a boolean representing key event type (down==True) ''' if e == K_RIGHT: \ self.player_ship.k_right = down*self.player_ship.turn_speed elif e == K_LEFT: \ self.player_ship.k_left = down*self.player_ship.turn_speed elif e == K_UP: \ self.player_ship.k_up = down*self.player_ship.acceleration elif e == K_DOWN: \ self.player_ship.k_down = down*self.player_ship.acceleration elif e == K_SPACE and down: self.player_ship.fire_missile() elif e == K_TAB and down: self.OPTIONS = (self.OPTIONS + 1) % 4 print(self.OPTIONS) def asteroid_spawn(self): ''' Generates an asteroid offscreen right which will travel towards spacestation. Consecutive asteroids will have semi-uniform motion, defined by the speed and direction. Asteroid spawn positions will be divided into rough 'sections' to help create a uniform asteroid field. ''' # Divide the screen into rough sections (1.25 asteroid height) spacing_height = asteroid.Asteroid.sprite.get_rect().h * 1.25 # Determine how many integer sections fit on screen spacing_number = int(self.screen_rect.h / spacing_height) # The second term positions the asteroid offscreen by some amount x = self.screen_rect.w + random.randrange(10, 50) # First term offsets the spacing so asteroids do not spawn # offscreen in the vertical axis. Second term determines # which 'section' to spawn asteroid in. y = 0.5 * spacing_height + \ spacing_height * random.randint(0,spacing_number) # Field movement properties speed = random.triangular(0.5, 3, 2) direction = random.uniform(170, 190) # Init asteroid object, add it to asteroid group new_asteroid = objects.object_types["asteroid"](position=(x, y), speed=speed, direction=direction) asteroid.Asteroid.asteroidGroup.add(new_asteroid) def alien_spawn(self, QTY): ''' Adds an integer QTY potential aliens to the infested asteroid 'aliens' attribute. This quantity will dictate the amount of aliens which can spawn from the alien_hop method. (I call it 'spawn pool') ''' self.infested_asteroid.aliens += QTY def alien_hop(self): ''' Handles creation of new aliens, movement of existing aliens, and drawing of teleport beams for successful hops. ''' # Only create aliens if there is a path if not self.paths is None: # Send an alien along each path for p in self.paths: if self.infested_asteroid.aliens != 0: new_alien = objects.object_types["alien"]( asteroid=self.infested_asteroid, # Start at source path=p[1:]) #No need to include source in path alien.Alien.alienGroup.add(new_alien) self.infested_asteroid.aliens -= 1 # Decrement 'spawn pool' self.paths = None # Only move if the spacestation is alive if self.space_station.alive(): target = self.space_station for al in alien.Alien.alienGroup: hop = None # Alien movement hop = al.path_hop(target) # Optional alien path drawing if self.OPTIONS == 1 and not al.path is None: self.alien_path_beams(al) # If hop successful, two nodes were returned, draw the beam if not hop is None: beam.Beam.beamGroup.add(\ objects.object_types["beam"](\ obj1 = hop[0], obj2 = hop[1], color = (128,255,0), surface = self.screen)) def offscreen(self): ''' Deactivates objects which go offscreen ''' # Since we don't want asteroids deleting before offscreen, add padding padding = 50 # Asteroids for o in asteroid.Asteroid.asteroidGroup: if o.position[0] < -padding or o.position[0] \ > self.screen_rect.w + padding or \ o.position[1] < -10 or o.position[1] \ > self.screen_rect.h+10: o.deactivate() # Missiles for o in missile.Missile.missileGroup: if o.position[0] < -padding or o.position[0] \ > self.screen_rect.w + padding or \ o.position[1] < -padding or o.position[1] > \ self.screen_rect.h+padding: o.deactivate() # Aliens for o in alien.Alien.alienGroup: if o.position[0] < -padding or o.position[0] \ > self.screen_rect.w + padding or \ o.position[1] < -padding or o.position[1] > \ self.screen_rect.h+padding: o.deactivate() def update(self): ''' Called by game.py, calls the update methods of various groups/objects. Also handles object collisions. ''' self.star_field.update() self.score.update(time.time()) collision.ast_mis(self.screen) collision.ast_ast() # Optional ship collision with asteroids if self.OPTIONS == 2: collision.ship_ast(self.screen) asteroid.Asteroid.asteroidGroup.update() ship.Ship.shipGroup.update() missile.Missile.missileGroup.update() self.graph_timer.update() alien.Alien.alienGroup.update() def draw(self): ''' Draws the background elements and game objects. ''' # Re-draw entire background self.screen.blit(self.bg_img, (0, 0)) self.star_field.draw(self.screen) self.score.draw(self.screen) # Draw the objects infested.Infested.infestedGroup.draw(self.screen) asteroid.Asteroid.asteroidGroup.draw(self.screen) ship.Ship.shipGroup.draw(self.screen) missile.Missile.missileGroup.draw(self.screen) spaceStation.SpaceStation.spaceStationGroup.draw(self.screen) alien.Alien.alienGroup.draw(self.screen) # Draw special effects # Particles draw simple circles on update particle.Particle.particleGroup.update() # Beams draw simple lines on update beam.Beam.beamGroup.update() # Update the screen pygame.display.flip() def graph_update(self): ''' Updates the graph (flow network), modifies the GUI paths attribute to include any paths returned from the max flow algorithm. Everytime this function is called, the graph is created from scratch, and max flow called on this new flow network. ''' # Reset paths self.paths = None # Since the graph creates new source/sink nodes, store these along # with the graph object g, s, t = graph.gen_flow_network( asteroid.Asteroid.asteroidGroup.sprites(), # Nodes self.infested_asteroid, # Source s self.space_station, # Sink t alien.Alien.radius) # The max teleport radius # Optional graph edge drawing if self.OPTIONS == 3: self.graph_beams(g) # Run max flow on the newly created graph object flow = graph.max_flow(g, s, t) if flow > 0: # There is at least 1 valid flow path # Reconstruct flows to yield a list of path lists. self.paths = graph.reconstruct_flows(g, s, t) def graph_beams(self, g): ''' Simply for demoing, draws a line for all edges in the graph object created in graph_update using the beam class. ''' for e in g.edges(): beam.Beam.beamGroup.add(\ objects.object_types["beam"](\ health = 1, obj1 = e[0], obj2 = e[1], color = (255,255,0), surface = self.screen)) def alien_path_beams(self, alien): ''' Simply for demoing, draws a line for all edges in the alien's path. Green means still viable, red means out of range. ''' curr = alien.asteroid for succ in alien.path[1:]: if euclidD(succ.position, curr.position) > alien.radius: color = (255, 153, 153) else: color = (102, 155, 102) beam.Beam.beamGroup.add(\ objects.object_types["beam"](\ health = 40, obj1 = curr, obj2 = succ, color = color, surface = self.screen)) curr = succ
class Tank(entities.Entity, entities.ProjectileCollider, entities.Blocking): def __init__(self, location, graphics, heading=Vector(1, 0)): super().__init__() self.graphics = graphics self.heading = Vector(0, -1) self.direction = Direction.NORTH self.moving = False self.type = type self.maxHitPoints = 10 self.hitpoints = self.maxHitPoints self.movementSpeed = 1 self.scorePoints = 0 self.shielded = False self.shieldEndTime = 0 self.weapon = Weapon(self, level=1) self.controller = None self.controllerTimer = Timer(50) tileBlockedFunction = lambda tile: not tile is None and tile.blocksMovement self.movementHandler = MovementHandler(self, tileBlockedFunction) self.setLocation(location) self.setHeading(heading) self.lastHitTime = None self.lastHitVector = Vector(0, 0) self.destroyCallback = None self.destroyed = False def setScorePoints(self, points): self.scorePoints = points def getScorePoints(self): return self.scorePoints def setMaxHitpoints(self, hitpoints): self.maxHitPoints = hitpoints self.repair() def repair(self): self.hitpoints = self.maxHitPoints def setController(self, controller): self.controller = controller self.playerControlled = isinstance(controller, tankcontroller.PlayerTankController) def getController(self): return self.controller def isPlayerControlled(self): return self.playerControlled def update(self, time, timePassed): if self.controllerTimer.update(time): self.controller.update(time, timePassed) if self.moving: movementVector = self.heading.multiplyScalar( self.movementSpeed * timePassed * 0.05).round() self.movementHandler.moveEntity(movementVector) self.checkIfShieldIsDone(time) self.moving = False pass def render(self, screen, offset, time): extraOffset = Vector(0, 0) if not self.lastHitTime == None and time - self.lastHitTime > 50: extraOffset = self.lastHitVector.multiplyScalar(-1).toUnit() drawOffset = Vector(offset[0], offset[1]) self.graphics.render(screen, drawOffset.add(self.location).add(extraOffset), self.direction) self.controller.render(screen) def moveSingleStep(self, direction): self.setHeading(direction) self.movementHandler.moveEntity(direction.toUnit()) def moveInDirection(self, direction): self.setHeading(direction) self.moving = True def canMoveInDirection(self, direction): return self.movementHandler.canMove(direction) def fire(self, time): if self.weapon.canFire(time): location = self.getProjectileFireLocation() self.weapon.fire(location, self.heading, time) def getProjectileFireLocation(self): halfProjectileSize = Vector(2, 2) location = self.getCenterLocation() return location.subtract(halfProjectileSize) # halfProjectileSize = Vector(2, 2) # location = self.getCenterLocation() # location = location.subtract(halfProjectileSize) # location = location.add(self.heading.toUnit().multiplyScalar(self.size.y / 2)) # return location def hitByProjectile(self, projectile, time): if self.shielded: return self.lastHitTime = time self.lastHitVector = projectile.directionVector self.hitpoints -= projectile.power if self.hitpoints <= 0: self.destroy() def setImage(self, image): self.image = image self.setSize(Vector(self.image.get_width(), self.image.get_height())) def getHeading(self): return self.heading def setHeading(self, newHeading): self.heading = newHeading self.direction = self.getDirectionFromVector(self.heading) self.setGraphics(self.direction) def setGraphics(self, direction): self.setImage(self.graphics.baseImages[direction]) self.turretImage = self.graphics.turretImages[direction] self.turretOffset = self.graphics.turretOffsets[direction] def getWeapon(self): return self.weapon def setWeapon(self, newWeapon): self.weapon = newWeapon def enableShield(self, duration): self.shielded = True self.shieldEndTime = pygame.time.get_ticks() + duration print(f'Shield enabled for {duration} seconds') def checkIfShieldIsDone(self, time): if self.shielded and time >= self.shieldEndTime: self.shielded = False print('Shield has ran out') def setDestroyCallback(self, callback): self.destroyCallback = callback def fireDestroyCallback(self): if self.destroyCallback != None: self.destroyCallback(self) def getHitpoints(self): return self.hitpoints def destroy(self): self.destroyed = True self.createExplosion() self.fireDestroyCallback() self.markDisposable() def createExplosion(self): image = images.get('explosion') entities.manager.add( entities.effect.Effect(image, self.getCenterLocation(), 300)) def isDestroyed(self): return self.destroyed
from sklearn.preprocessing import MinMaxScaler from sklearn.decomposition import PCA import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from utilities import Timer, MetaData, ResultsWriter # file properties # ----------------------------------------------------- filePath = '../data/results.txt' metadata = MetaData() dataType = metadata.getResultColsDataType() timer = Timer() startTime = timer.getTime() print('Start Time : ', timer.getTime()) # Get the start time for tracking purposes print('------------------------------------------------------------') print('Reading files ... ') print('------------------------------------------------------------') data = np.loadtxt(filePath, delimiter = ',', skiprows = 1, dtype=dataType) df = pd.DataFrame(data) # Separating the subject and activity activity = df.ix[:,-1]%100 activity.name = 'predicted_activity' subject = (df.ix[:,-1] - activity) / 100 subject.name = 'predicted_subj'
# The number, 197, is called a circular prime because all rotations of the digits: # 197, 971, and 719, are themselves prime. # There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97. # How many circular primes are there below one million? # 3.47s, too slow from math import sqrt from utilities import e_sieve, Timer timer = Timer() primes_under_mill = e_sieve(999999) # Uses sieve of Erastosthenes, check utilities.py for implementation def is_not_prime(num): for i in range(2, int(sqrt(num)) + 1): if num % i == 0: # If num is evenly divisible by any number 2-sqrt(num) it cannot be prime return True return False def is_circular_prime(number): number = str(number) rotations = {number[x:] + number[:x] for x in range(len(number))} # Generates rotations for p in rotations: if is_not_prime(int(p)): # If any rotation is not prime, number is not a circular prime return False return True circular_primes = set()
optimizer = torch.optim.Adam([{ 'params': embedding_net.parameters() }, { 'params': cls_head.parameters() }], lr=0.001) #embedding_net, cls_head, optimizer, start_epoch = load_checkpoint(embedding_net, cls_head, optimizer, # '/data/liuyong/TianPinzhuo/IJCAI/r2d2_office31_5way/epoch_30.pth.tar') max_val_acc = 0.0 x_entropy = torch.nn.CrossEntropyLoss() timer = Timer() for epoch in range(1, opt.num_epoch + 1): _, _ = [x.train() for x in (embedding_net, cls_head)] train_accuracies = [] train_losses = [] for i, batch in enumerate(tqdm(dloader_train(epoch)), 1): data_support, labels_support, data_query, labels_query, _, _ = [ x.cuda() for x in batch ] data_support = data_support.float()
def main(benchmark, size=None, backend=None, repetitions=None, burnin=1, device="cpu"): """HPC benchmarks for Python Usage: $ python run.py benchmarks/<BENCHMARK_FOLDER> Examples: $ taskset -c 0 python run.py benchmarks/equation_of_state $ python run.py benchmarks/equation_of_state -b numpy -b jax --device gpu More information: https://github.com/dionhaefner/pyhpc-benchmarks """ try: bm_module, bm_identifier = get_benchmark_module(benchmark) except ImportError as e: click.echo(f"Error while loading benchmark {benchmark}: {e!s}", err=True) raise click.Abort() available_backends = set(bm_module.__implementations__) if len(backend) == 0: backend = available_backends.copy() else: backend = set(backend) unsupported_backends = [b for b in backend if b not in available_backends] for b in unsupported_backends: click.echo( f'Backend "{b}" is not supported by chosen benchmark (skipping)', err=True) backend.remove(b) for b in backend.copy(): try: with setup_functions[b](device=device) as bmod: click.echo(f"Using {b} version {bmod.__version__}") except BackendNotSupported as e: click.echo( f'Setup for backend "{b}" failed (skipping), reason: {e!s}', err=True) backend.remove(b) try: check_backend_conflicts(backend, device) except BackendConflict as exc: click.echo(f"Backend conflict: {exc!s}", err=True) raise click.Abort() runs = sorted(itertools.product(backend, size)) if len(runs) == 0: click.echo("Nothing to do") return timings = {run: [] for run in runs} if repetitions is None: click.echo("Estimating repetitions...") repetitions = {} for b, s in runs: # use end-to-end runtime for repetition estimation def run_func(): run = bm_module.get_callable(b, s, device=device) with setup_functions[b](device=device): run() repetitions[(b, s)] = estimate_repetitions(run_func) else: repetitions = {(b, s): repetitions for b, s in runs} all_runs = list( itertools.chain.from_iterable([run] * (repetitions[run] + burnin) for run in runs)) random.shuffle(all_runs) results = {} checked = {r: False for r in runs} pbar = click.progressbar(label=f"Running {len(all_runs)} benchmarks...", length=len(runs)) try: with pbar: for (b, size) in all_runs: with setup_functions[b](device=device): run = bm_module.get_callable(b, size, device=device) with Timer() as t: res = run() # YOWO (you only warn once) if not checked[(b, size)]: if size in results: is_consistent = check_consistency( results[size], convert_to_numpy(res, b, device)) if not is_consistent: click.echo( f"\nWarning: inconsistent results for size {size}", err=True, ) else: results[size] = convert_to_numpy(res, b, device) checked[(b, size)] = True timings[(b, size)].append(t.elapsed) pbar.update(1.0 / (repetitions[(b, size)] + burnin)) # push pbar to 100% pbar.update(1.0) for run in runs: assert len(timings[run]) == repetitions[run] + burnin finally: stats = compute_statistics(timings) click.echo(format_output(stats, bm_identifier, device=device))
import pandas as pd import time from sklearn.model_selection import StratifiedShuffleSplit from sklearn.multioutput import MultiOutputClassifier from sklearn.naive_bayes import GaussianNB from utilities import Timer, MetaData, ResultsWriter # file properties # ----------------------------------------------------- filePath = '../data/consolidated_clean_all.txt' metadata = MetaData() dataType = metadata.getProcessedColsDataType() timer = Timer() startTime = timer.getTime() print('Start Time : ', timer.getTime()) # Get the start time for tracking purposes print('------------------------------------------------------------') print('Reading files ... ') print('------------------------------------------------------------') # Note that this is a numpy structured array as the data set contains both int and float # http://docs.scipy.org/doc/numpy/user/basics.rec.html data = np.genfromtxt(filePath, delimiter=',', skip_header=1, dtype=dataType) df = pd.DataFrame(data) df.ix[:, :31] = (df.ix[:, :31] - df.ix[:, :31].mean()) / (df.ix[:, :31].max() - df.ix[:, :31].min()) subj = df.ix[:, -2]
import numpy as np import pandas as pd from utilities import Timer, MetaData # file properties # ----------------------------------------------------- filePath = '../data/consolidated_all.txt' outputFile = '../data/consolidated_clean_all.txt' metadata = MetaData() dataType = metadata.getOriginalColsDataType() timer = Timer() startTime = timer.getTime() print('Start Time : ', timer.getTime()) # Get the start time for tracking purposes print('------------------------------------------------------------') print('Reading files ... ') print('------------------------------------------------------------') # Note that this is a numpy structured array as the data set contains both int and float # http://docs.scipy.org/doc/numpy/user/basics.rec.html #activityData = np.genfromtxt(filePath, delimiter = ',', skip_header = 1, dtype=dataType) activityData = np.loadtxt(filePath, delimiter=',', skiprows=1, dtype=dataType) print('loading Time : ', timer.getTime()) # convert to pandas data frame df = pd.DataFrame(activityData) # count missing values in df print('--------------------------------------')