Esempio n. 1
0
    def create_cnn():
        architecture = CNN.Architecture(final_filter_size=768 // 2, num_repeat_normal=6, num_modules=3)
        with utils.Timer("CNN construct", lambda x: logger.debug(x)):
            cnn = CNN(input_channels=3, height=32, width=32, output_classes=10, gpu=gpu, num_cell_blocks=num_cell_blocks,
                      architecture=architecture)
            cnn.train()

        with utils.Timer("Optimizer Construct", lambda x: logger.debug(x)):
            dropout_opt = DropoutSGD([cnn.dag_variables, cnn.reducing_dag_variables], connections=cnn.all_connections,
                                     lr=8 * 25 * 10, weight_decay=0)

            dropout_opt.param_groups[0]['lr'] /= (architecture.num_repeat_normal*architecture.num_modules/(architecture.num_modules-1))

            # cnn_optimizer = AdamShared(cnn.parameters(), lr=0.00001, weight_decay=1e-5, gpu=gpu)
            # cnn_optimizer = AdamShared(cnn.parameters(), lr=l_max, weight_decay=1e-4, gpu=gpu)

            # Follows ENAS
            l_max = 0.05
            l_min = 0.001
            period_multiplier = 2
            period_start = 10
            cnn_optimizer = SGDShared(cnn.parameters(), lr=l_max, momentum=0.9, dampening=0, weight_decay=1e-4,
                                      nesterov=True, gpu_device=gpu)
            learning_rate_scheduler = CosineAnnealingRestartingLR(optimizer=cnn_optimizer, T_max=period_start,
                                                                  eta_min=l_min, period_multiplier=period_multiplier)

        return cnn, cnn_optimizer, dropout_opt, learning_rate_scheduler
Esempio n. 2
0
    def __init__(self,
                 x=None,
                 y=None,
                 weather=None,
                 weather_list=None,
                 in_game_time=None,
                 view_mode=None,
                 timeout=30,
                 safety_timeout=1.0,
                 seed=None,
                 **kwargs):
        self.x = x or h_utils.random_point(0.0, constants.MIN_X,
                                           constants.MAX_X)
        self.y = y or h_utils.random_point(0.0, constants.MIN_Y,
                                           constants.MAX_Y)
        self.weather = weather or random.choice(weather_list
                                                or constants.WEATHER_LIST)
        self.in_game_time = in_game_time or random.randint(0, 24 * 60)

        if view_mode is not None:
            self.view_mode = view_mode
        else:
            self.view_mode = constants.ViewMode.FIRST_PERSON

        self.timeout_timer = utils.Timer(timeout)
        self.make_safe_timer = utils.Timer(safety_timeout)
        self.seed = seed or random.randint(0, 1000000)
        self.should_keep_running = True
Esempio n. 3
0
    def __init__(self, image, target):
        People.__init__(self, image)
        self.ep = random.randint(50, 250)
        x, y = 0, 0
        off_x = [-100, 900]
        off_y = [-100, 700]

        hide_at_x = random.random() < 0.5
        if hide_at_x:
            x = random.choice(off_x)
            y = random.randint(0, 600)
        else:
            x = random.randint(0, 800)
            y = random.choice(off_y)

        pos = (x, y)
        self.set_position(pos)
        self.target = target
        self.ticks = pygame.time.get_ticks()
        self.move_speed = 1.0
        self.points = random.randint(1, 10)
        self.dead = False
        self.busy = False
        self.busy_timer = utils.Timer(10)
        self.busy_timer.deactivate()
        self.stun_flag = False
        self.stun_timer = utils.Timer(10)
        self.stun_timer.deactivate()
        self.hp = self.hp_max
        self.hit_flag = False
        self.hit_timer = utils.Timer(10)
        self.hit_timer.deactivate()
Esempio n. 4
0
def main():
    if not os.path.exists(OUT_DIR):
        os.mkdir(OUT_DIR)
    total_timer = utils.Timer()
    total_timer.start()
    scene = set_scene()
    main_camera = scene.get_main_camera()
    # ------------------------------------------------------------------------
    # Illuminate
    timer = utils.Timer()
    timer.start()
    LIGHT_SCREEN_WIDTH = 1024
    LIGHT_SCREEN_HEIGHT = 1024
    # n0 = np.array([0, 0, -1])
    # n1 = utils.normalize(np.array([1, 1, 0]))
    n0 = DEFAULT_N0
    n1 = DEFAULT_N2
    backward_raytracing(scene, scene.lights[0], LIGHT_SCREEN_WIDTH,
                        LIGHT_SCREEN_HEIGHT, n0, n1)
    timer.stop()
    print(f"Total time spent illuminating: {timer}")
    for i in range(len(scene.objects)):
        obj = scene.objects[i]
        if obj.material.illumination_map:
            map = obj.material.illumination_map.data
            img_output = Image.fromarray(map)
            filename = f"{OUT_DIR}/5_illumination_map_{i}.jpg"
            img_output.save(filename, quality=MAX_QUALITY)
    # Load saved illumination maps
    # for i in range(2, 8):
    #     filename = f"5_illumination_map_{i}.jpg"
    #     scene.objects[i].material.illumination_map.load(filename)
    # ------------------------------------------------------------------------
    # Rendering
    if debug:
        V_SAMPLES = 1
        H_SAMPLES = 1
    else:
        V_SAMPLES = 8
        H_SAMPLES = 8
    timer = utils.Timer()
    timer.start()
    print("Running pathtracer")
    screen = render_aa_t(scene, main_camera, pathtrace, SCREEN_HEIGHT,
                         SCREEN_WIDTH, V_SAMPLES, H_SAMPLES)
    # screen = render_aa_t(
    #     scene, main_camera, raytrace, SCREEN_HEIGHT, SCREEN_WIDTH, V_SAMPLES,
    #     H_SAMPLES
    # )
    # screen = render_mp(scene, main_camera, SCREEN_HEIGHT, SCREEN_WIDTH)
    timer.stop()
    # ------------------------------------------------------------------------
    print(f"Total time spent rendering: {timer}")
    img_output = Image.fromarray(screen)
    img_output.save(OUTPUT_IMG_FILENAME, quality=MAX_QUALITY)
    print(f"Output image created in: {OUTPUT_IMG_FILENAME}")
    total_timer.stop()
    print(f"Total time in the program: {total_timer}")
Esempio n. 5
0
 def __init__(self, images, pos, type):
     self.type = type
     self.images = images
     self.img_nr = 0
     self.image = self.images[self.img_nr]
     Moveable.__init__(self, self.image)
     self.set_position(pos)
     self.hover = False
     self.anim_timer = utils.Timer(40)
     self.tele_timer = utils.Timer(40)
     self.tele_timer.deactivate()
     self.timer = utils.Timer(400)
Esempio n. 6
0
    def __init__(self,
                 a_peltier_number: PeltierNumber,
                 a_netvars: NetworkVariables,
                 a_ready_hold_timer: int = 10,
                 a_wait_peltier_timeout_s: int = 10,
                 a_timeout_s: int = 30):
        super().__init__()

        if a_peltier_number == PeltierTest.PeltierNumber.FIRST:
            self.temp_setpoint = a_netvars.peltier_1_temperature_setpoint
            self.temp_max = a_netvars.peltier_1_temperature_max
            self.current_temp = a_netvars.peltier_1_temperature
            self.polarity_pin = a_netvars.peltier_1_invert_polarity
            self.is_peltier_ready = a_netvars.peltier_1_ready
        elif a_peltier_number == PeltierTest.PeltierNumber.SECOND:
            self.temp_setpoint = a_netvars.peltier_2_temperature_setpoint
            self.temp_max = a_netvars.peltier_2_temperature_max
            self.current_temp = a_netvars.peltier_2_temperature
            self.polarity_pin = a_netvars.peltier_2_invert_polarity
            self.is_peltier_ready = a_netvars.peltier_2_ready
        elif a_peltier_number == PeltierTest.PeltierNumber.THIRD:
            self.temp_setpoint = a_netvars.peltier_3_temperature_setpoint
            self.temp_max = a_netvars.peltier_3_temperature_max
            self.current_temp = a_netvars.peltier_3_temperature
            self.polarity_pin = a_netvars.peltier_3_invert_polarity
            self.is_peltier_ready = a_netvars.peltier_3_ready
        else:  # a_peltier_number == PeltierTest.PeltierNumber.FOURTH
            self.temp_setpoint = a_netvars.peltier_4_temperature_setpoint
            self.temp_max = a_netvars.peltier_4_temperature_max
            self.current_temp = a_netvars.peltier_4_temperature
            self.polarity_pin = a_netvars.peltier_4_invert_polarity
            self.is_peltier_ready = a_netvars.peltier_4_ready

        self.setpoint_shift = 5
        self.prev_setpoint = self.temp_setpoint.get()
        self.start_temp = self.current_temp.get()
        self.expected_temp = 0
        self.error_message = ""

        self.temp_has_not_changes_window_percent = 5
        self.wrong_polarity_window_percents = 20

        self.netvars = a_netvars
        self.__timeout_s = a_timeout_s

        self.wait_peltier_timer = utils.Timer(a_wait_peltier_timeout_s)
        self.ready_hold_timer = utils.Timer(a_ready_hold_timer)
        self.__status = ClbTest.Status.NOT_CHECKED
        self.__stage = PeltierTest.Stage.TEMP_UP
    def __init__(self, pos, radius, color, controller=None, can_suffer=False):
        super(QAgent, self).__init__(pos, radius, color)

        self.reward_monitor = utils.ValueMonitor()
        self.controller = controller
        self.can_suffer = can_suffer
        self.move_timer = utils.Timer(config.STEP_TIME)
Esempio n. 8
0
def bert2gram_decoder(args, data_loader, dataset, model, test_input_refactor,
                      pred_arranger, mode):
    logging.info(' Bert2Gram : Start Generating Keyphrases for %s  ...' % mode)
    test_time = utils.Timer()

    tot_examples = 0
    tot_predictions = []

    for step, batch in enumerate(tqdm(data_loader)):

        inputs, indices, lengths = test_input_refactor(batch,
                                                       model.args.device)
        try:
            logit_lists = model.test_bert2gram(inputs, lengths,
                                               args.max_phrase_words)
        except:
            logging.error(str(traceback.format_exc()))
            continue

        # decode logits to phrase per batch
        params = {
            'examples': dataset.examples,
            'logit_lists': logit_lists,
            'indices': indices,
            'max_phrase_words': args.max_phrase_words,
            'return_num': Decode_Candidate_Number[args.dataset_class],
            'stem_flag': False
        }

        batch_predictions = generator.gram2phrase(**params)
        tot_predictions.extend(batch_predictions)

    candidate = pred_arranger(tot_predictions)
    return candidate
Esempio n. 9
0
def run_manager(worker_threads, sess, lr, step_counter, update_counter, log_dir, saver,
                wake_interval_seconds, ckpt_interval_seconds):
    checkpoint_file = osp.join(log_dir, 'checkpoints', 'network.ckpt') # Junte um ou mais componentes de caminho de maneira inteligente. O valor de retorno é a concatenação de caminho e qualquer membro de * caminhos com exatamente um separador de diretório ( os.sep) seguindo cada parte não vazia, exceto a última, significando que o resultado só terminará em um separador se a última parte estiver vazia. Se um componente for um caminho absoluto, todos os componentes anteriores serão descartados e a junção continuará a partir do componente de caminho absoluto.

    ckpt_timer = utils.Timer(duration_seconds=ckpt_interval_seconds)
    ckpt_timer.reset()

    step_rate = utils.RateMeasure()
    step_rate.reset(int(step_counter))

    while True:
        time.sleep(wake_interval_seconds)

        steps_per_second = step_rate.measure(int(step_counter))
        easy_tf_log.tflog('misc/steps_per_second', steps_per_second)
        easy_tf_log.tflog('misc/steps', int(step_counter))
        easy_tf_log.tflog('misc/updates', int(update_counter))
        easy_tf_log.tflog('misc/lr', sess.run(lr))

        alive = [t.is_alive() for t in worker_threads]

        if ckpt_timer.done() or not any(alive):
            saver.save(sess, checkpoint_file, int(step_counter))
            print("Checkpoint saved to '{}'".format(checkpoint_file))
            ckpt_timer.reset()

        if not any(alive):
            break
Esempio n. 10
0
 def __init__(self, quit_callback=None):
     super(HappyMacStatusBarApp, self).__init__("", quit_button=None)
     self.quit_button = None
     self.quit_callback = quit_callback
     self.menu = []
     self.loading()
     self.menu._menu.setDelegate_(self)
     self.start = time.time()
     self.need_menu = False
     self.last_menu_item = None
     self.last_highlight_change = time.time()
     utils.set_menu_open(False)
     utils.Timer(1.0, self.main_update).start()
     utils.Timer(10.0, process.cache_processes, False).start()
     process.cache_processes()
     log.log("Started HappyMac %s" % version_manager.last_version())
Esempio n. 11
0
def evaluate(args, data_loader, model, stats, writer):
    logger.info("start evaluate valid ( %d epoch ) ..." % stats['epoch'])

    epoch_time = utils.Timer()

    epoch_loss = 0
    epoch_step = 0

    for step, batch in enumerate(tqdm(data_loader)):
        try:
            loss = model.predict(batch)
        except:
            logging.error(str(traceback.format_exc()))
            continue

        epoch_loss += loss
        epoch_step += 1

    eval_loss = float((epoch_loss / epoch_step))

    if args.local_rank in [-1, 0] and args.use_viso:
        writer.add_scalar('valid/loss', eval_loss, stats['epoch'])

    logging.info(
        'Valid Evaluation | Epoch Mean Loss = %.4f ( Epoch = %d ) | Time for epoch = %.2f (s) \n'
        % (eval_loss, stats['epoch'], epoch_time.time()))

    return eval_loss
Esempio n. 12
0
def avgAcc(numTrial, dataloader, trainFunc, params):
    trainSet, testSet, valSet = dataloader.readData()
    trainAcc = []
    valAcc = []
    cateAcc = []
    timer = utils.Timer()
    for i in range(1, numTrial+1):
        print(f"Trial {i} starts at {timer()}")
        ta, va, ca = trainFunc(dataloader, trainSet, testSet, valSet, params)
        print(f"Trail {i} result:")
        print(f"Train accuracy {ta}")
        print(f"Val accuracy {va}")
        print(f"Cate accuracy {ca}")
        trainAcc.append(ta)
        valAcc.append(va)
        cateAcc.append(ca)
    trainAcc = tf.reduce_mean(tf.stack(trainAcc))
    valAcc = tf.reduce_mean(tf.stack(valAcc))
    cateAcc = tf.reduce_mean(tf.stack(cateAcc), axis=0)
    print(f"Finished {timer()} {sys.argv}")
    print("Overall train accuracy %.4f" % (float(trainAcc)))
    print("Overall val accuracy %.4f" % (float(valAcc)))
    print("Overall cate accuracy ", end='')
    for i in cateAcc[:-1]:
        print("%.2lf," % i, end=' ')
    print("%.3f" % cateAcc[-1])
Esempio n. 13
0
def generate_candidates(dataset, data_loader, model, mode):
    logging.info(' Start Generating Keyphrases for %s  ...' % mode)
    test_time = utils.Timer()

    tot_examples = 0
    tot_predictions = []
    for step, batch in enumerate(tqdm(data_loader)):
        try:
            logit_lists = model.test(batch)
        except:
            logging.error(str(traceback.format_exc()))
            continue

        example_indices, example_lengths = batch[-1], batch[-2]
        for batch_id, logit_list in enumerate(logit_lists):
            example = dataset.examples[example_indices[batch_id]]

            # example info
            valid_length = example_lengths[batch_id]
            cut_tokens = example['orig_tokens'][:valid_length]

            # decode tag to phrase and sort
            n_best_phrases, n_best_scores = decode_n_best_candidates(
                cut_tokens, logit_list, converter, args.max_phrase_words, 100)
            candidate_KP = remove_empty(n_best_phrases)[:5]
            assert len(candidate_KP) == 5

            # log groundtruths & predictions
            tot_predictions.append((example['url'], candidate_KP))
            tot_examples += 1
    logging.info(
        'Dataset: %s Finish Generation | Total Examples = %d | predict time = %.2f (s)'
        % (mode, tot_examples, test_time.time()))
    return tot_predictions
Esempio n. 14
0
    def process(self):
        time_statictics = {}
        time_statictics['network_time'] = []
        time_statictics['cpu_time'] = []
        time_statictics['disk_time'] = []

        timer = utils.Timer()
        # 1. 加载图片url列表
        url_list = utils.urllist()
        # 2. 调度下载模块
        timer.tick()
        content_list = self.downloader.process(url_list)
        time_cost = timer.tock()
        time_statictics['network_time'].append(time_cost)

        # 3. 调度哈希模块
        timer.tick()
        md5_list = self.hasher.process(content_list)
        time_cost = timer.tock()
        time_statictics['cpu_time'].append(time_cost)

        # 4. 调度存储模块
        item_list = []
        for content, md5 in zip(content_list, md5_list):
            path = self._wrap_path(md5)
            item = (content, path)
            item_list.append(item)
        timer.tick()
        self.storager.process(item_list)
        time_cost = timer.tock()
        time_statictics['disk_time'].append(time_cost)
        return time_statictics
Esempio n. 15
0
    def __init__(self, motion_model=None):
        self.simulation_rate = rospy.get_param("~simulation_rate")
        start_state = map(float, rospy.get_param("~start_state").split(","))

        print "Starting at state:", start_state

        # origin is the back right wheel of the car if the car is pointing along the positive x axis
        # 0.3 meters wide, 0.55 meters long
        # self.dimensions = np.array([0.55, 0.3])
        self.dimensions = np.array([0.5, 0.25])
        # center of lidar, centered side to side and 0.14 meters from front
        self.lidar_position = np.array([0.41, 0.15])
        # point between the rear tires, used as the state in value iteration
        self.base_frame = np.array([0.11, 0.15])

        # x, y, theta, steering angle, throttle, velocity
        self.state = np.zeros(6)
        self.state[:3] = start_state
        self.iter = 0
        self.last_update = 0.0
        self.lock = Lock()

        self.derivatives = None
        self.local_linear_acceleration = None

        if motion_model == None:
            print "ERROR: Must provide a motion model to the RACECAR simulator"
        self.motion_model = motion_model

        self.simulation_timer = utils.Timer(20)
Esempio n. 16
0
 def __call__(self, batch):
     timer = utils.Timer()
     timer.tic()
     rawids_list = [self.tokenizer.encode(t) for t in batch]
     ids, labels, paddings = gen_casual_targets(rawids_list)
     logging.debug("Text Processing Time: {}s".format(timer.toc()))
     return ids, labels, paddings
Esempio n. 17
0
    def train(self):
        timer = utils.Timer()
        self.best_cvloss = 9e20
        if self.cv_loss:
            self.best_cvloss = min(self.cv_loss)

        while self.epoch < self.num_epoch:
            timer.tic()
            self.epoch += 1
            logging.info("Training")
            tr_loss = self.iter_one_epoch()
            tr_msg = ("tr loss: {:.4f}").format(tr_loss)
            msg = "\n" + "-"*85 + "\n"
            msg += "Epoch {} Training Summary:\n{}\n".format(self.epoch, tr_msg)
            msg += "-"*85
            logging.info(msg)
            self.save(os.path.join(self.exp_dir, "ep-{:04d}.pt".format(self.epoch)))
            self.save(os.path.join(self.exp_dir, "last.pt"))
            logging.info("Validation")
            cv_loss = self.iter_one_epoch(cross_valid=True)

            if self.best_cvloss > cv_loss:
                self.best_cvloss = cv_loss
            train_time = timer.toc()
            cv_msg = ("cv loss: {:.4f} | best cv loss {:.4f}").format(cv_loss, self.best_cvloss)
            msg = "\n" + "-"*85 + "\n"
            msg += "Epoch {} Validation Summary:\n{}\n".format(self.epoch, cv_msg)
            msg += "Time cost: {:.4f} min".format(train_time/60.)
            msg += "\n" + "-"*85 + '\n'
            logging.info(msg)
            self.tr_loss.append(tr_loss)
            self.cv_loss.append(cv_loss)

            if self.num_last_ckpt_keep:
                utils.cleanup_ckpt(self.exp_dir, self.num_last_ckpt_keep)
Esempio n. 18
0
File: train.py Progetto: xllg/Ernn
def train(args, data_loader, model, global_stats):
    """
    Run through one epoch of model training with the provided data loader.
    :param args:
    :param data_loader:
    :param model:
    :param global_stats:
    :return:
    """
    # Initialize meters + timers
    train_loss = utils.AverageMeter()
    epoch_time = utils.Timer()

    # Run one epoch
    for idx, ex in enumerate(data_loader):
        train_loss.update(*model.update(ex))
        if idx % args.display_iter == 0:
            logger.info('train: Epoch = %d | iter = %d/%d | ' %
                        (global_stats['epoch'], idx, len(data_loader)) +
                        'loss = %.2f | elapsed time = %.2f (s)' %
                        (train_loss.avg, global_stats['timer'].time()))
            train_loss.reset()

    logger.info('train: Epoch %d done。 Time for epoch = %.2f' %
                (global_stats['epoch'], epoch_time.time()))

    # Checkpoint
    if args.checkpoint:
        model.checkpoint(args.model_file + '.checkpoint',
                         global_stats['epoch'] + 1)
Esempio n. 19
0
    def test_down_down_x4_naive_large_aa(self) -> None:
        x = torch.randn(1, 3, 2048, 2048)
        with utils.Timer('(2048, 2048) RGB to (512, 512) with AA (Naive): {}'):

            x = self.imresize(x, sizes=(512, 512), antialiasing=True)

        return
Esempio n. 20
0
def adapt_user(s2s, trainer, train_src, train_trg, test_src, opt):
    timer = utils.Timer()
    log = utils.Logger(opt.verbose)
    n_train = len(train_src)
    n_tokens = (sum(map(len, train_trg)) - len(train_trg))
    # Train for n_iter
    timer.restart()
    best_ppl = np.inf
    for epoch in range(opt.num_epochs):
        timer.tick()
        dy.renew_cg()
        losses = []
        # Add losses for all samples
        for x, y in zip(train_src, train_trg):
            losses.append(
                s2s.calculate_user_loss([x], [y], [0],
                                        update_mode=opt.update_mode))
        loss = dy.average(losses)
        # Backward + update
        loss.backward()
        trainer.update()
        # Print loss etc...
        train_loss = loss.value() / n_tokens
        train_ppl = np.exp(train_loss)
        trainer.status()
        elapsed = timer.tick()
        log.info(" Training_loss=%f, ppl=%f, time=%f s, tok/s=%.1f" %
                 (train_loss, train_ppl, elapsed, n_tokens / elapsed))
        if train_ppl < best_ppl:
            best_ppl = train_ppl
            translations = evaluate_model(s2s, test_src, opt.beam_size)
        else:
            log.info("Early stopping after %d iterations" % (epoch + 1))
            break
    return translations
Esempio n. 21
0
 def run(self):
     AS = Basic_Battle.App_Status
     wt = self.wait_time
     while True:
         sleep(wt)
         timer = utils.Timer()
         current_app_status = self.single_run()
         log.info('Single run was finished, cost around %d ms.',
                  timer.point())
         log.info(
             'Max run times: %.0f, current run times: %d, win wimes: %d.',
             self.max_runtimes if self.max_runtimes >= 0 else float('inf'),
             self.current_runtimes, self.current_winwimes)
         self._report()
         if current_app_status == AS.STOP:
             break
         if current_app_status == AS.CONTINUE:
             continue
         if current_app_status == AS.UNKNOWN and self.last_status == AS.UNKNOWN:
             self.constant_unknown_status += 1
             if self.constant_unknown_status == self.max_constant_unknown_status:
                 log.info(
                     'UNKNOWN status keeps %d times. Write the screen img to disc.'
                 )
                 write_tmp_images()
             if self.constant_unknown_status >= self.max_constant_unknown_status:
                 wt *= 1.1
                 log.info('Retry... Wait around %d ms', wt)
         else:
             self.constant_unknown_status = 0
             wt = self.wait_time
         self.last_status = current_app_status
Esempio n. 22
0
def validate_unofficial(args, data_loader, model, global_stats, mode):
    """Run one full unofficial validation.
    Unofficial = doesn't use SQuAD script.
    """
    eval_time = utils.Timer()
    start_acc = utils.AverageMeter()
    end_acc = utils.AverageMeter()
    exact_match = utils.AverageMeter()

    # Make predictions
    examples = 0
    for ex in data_loader:
        batch_size = ex[0].size(0)
        with torch.no_grad():
            pred_s, pred_e, _ = model.predict(ex)
            target_s, target_e = ex[-3:-1]

            # We get metrics for independent start/end and joint start/end
            accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e)
            start_acc.update(accuracies[0], batch_size)
            end_acc.update(accuracies[1], batch_size)
            exact_match.update(accuracies[2], batch_size)

            # If getting train accuracies, sample max 10k
            examples += batch_size
            if mode == 'train' and examples >= 1e4:
                break

    logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' %
                (mode, global_stats['epoch'], start_acc.avg) +
                'end = %.2f | exact = %.2f | examples = %d | ' %
                (end_acc.avg, exact_match.avg, examples) +
                'valid time = %.2f (s)' % eval_time.time())

    return {'exact_match': exact_match.avg}
Esempio n. 23
0
    def __init__(self, a_tests: List[tests_base.ClbTest]):
        super().__init__()

        self.tests = a_tests
        self.test_results = [TestResults() for _ in range(len(self.tests))]

        self.enabled_tests = []
        self.prepare_timer = utils.Timer(1.5)
        self.timeout_timer = utils.Timer(30)

        # Работает в цикличном режиме
        self.read_graphs_time = utils.Timer(0.1)
        self.read_graphs_time.start()

        self.__started = False
        self.current_test_idx = 0
Esempio n. 24
0
    def process(self):
        time_statictics = {
            "network_time": [],
            "cpu_time": [],
            "disk_time": [],
        }
        timer = utils.Timer()

        url_list = utils.url_list()

        timer.tick()
        content_list = self.downloader.process(url_list)
        time_cost = timer.tock()
        time_statictics["network_time"].append(time_cost)

        timer.tick()
        md5_list = self.hasher.process(content_list)
        time_cost = timer.tock()
        time_statictics["cpu_time"].append(time_cost)

        item_list = []
        for content, md5 in zip(content_list, md5_list):
            path = self._wrap_path(md5)
            item = (content, path)
            item_list.append(item)

        timer.tick()
        self.storager.process(item_list)
        time_cost = timer.tock()
        time_statictics["disk_time"].append(time_cost)

        return time_statictics
Esempio n. 25
0
def extract_plain_text(path, english_only=False, chinese_only=False):

    timer = utils.Timer()
    timer.start()

    subs = utils.load_sub_file(path)
    plaintext = utils.get_plaintext(subs)

    if english_only and chinese_only == True:
        print(
            '仅保留中文和仅保留英文不能同时勾选\nChinese only and English only cannot be checked at the same time'
        )
        sys.exit(0)

    elif chinese_only:
        chinese_lines = []
        for i in range(len(plaintext)):
            chinese_lines.append(utils.chinese_only(plaintext[i]) + '\n')
        utils.write_lines('%s.txt' % (output_file_name), chinese_lines)

    elif english_only:
        english_lines = []
        for i in range(len(plaintext)):
            english_lines.append(utils.english_only(plaintext[i]) + '\n')
        utils.write_lines('%s.txt' % (output_file_name), english_lines)

    else:
        utils.write_lines('%s.txt' % (output_file_name), plaintext)

    timer.stop()

    print('提取完成,用时%.2f秒' % (timer.elapsed))
Esempio n. 26
0
	def get(self):
		'''
		For an ajax call to create a playlist
		Also serves the first n tracks in the playlist
		'''
		timer = utils.Timer()
		time = timer.time
		
		station_tags,serendipity,city = self.get_station_meta_from_session()
		time('b_get_station_meta')
		
		# create the station!!
		station = utils.StationPlayer(station_tags,serendipity,city)
		session,algo_times = station.create_station()
		
		time('d_create_station')
		# pull the first 10 tracks
		artists = self.fetch_next_n_artists(10,session)
		packaged_artists = self.package_artist_multi(artists)
		
		time('e_package_artists')
		global_times = timer.get_times()
		
		logging.info('{} tracks in the playlist'.format(station.sorted_tracks_list.__len__()))
		logging.info(json.dumps({'global':global_times,'algo':algo_times}))
		self.response.out.write(json.dumps(packaged_artists))
Esempio n. 27
0
def srt2ss(path, is_ass=True):

    timer = utils.Timer()
    timer.start()

    subs = utils.load_sub_file(path)

    start_time = utils.get_start_time(subs, 'ass')
    end_time = utils.get_end_time(subs, 'ass')
    plaintext = utils.get_plaintext(subs)

    sub_block = []
    LAYER = 0
    STYLE = 'Default'
    NAME = ''
    MARGINL = 0
    MARGINV = 0
    EFFECT = ''
    for i in range(len(subs)):
        sub_block.append('Dialogue: %d, %s, %s, %s, %s, %d, %d, %s, %s' %
                         (LAYER, start_time[i], end_time[i], STYLE, NAME,
                          MARGINL, MARGINV, EFFECT, plaintext[i]))
    utils.write_txt('%s.ass' % (output_filename), script_info())
    utils.write_lines('%s.ass' % (output_filename), sub_block, mode='a')

    timer.stop()

    print('转换完成,用时%.2f秒' % (timer.elapsed))
Esempio n. 28
0
    def _restart_server_state(self):
        self.toggle_agent(False)

        self.server_key = 1337
        self.can_start = False
        self.sync_mode = False

        self._is_env_running = False
        self._env_class = scenarios.DrivingPointGoal
        self._env = None

        self._is_agent_running = False
        self._agent_class = agents_privileged.Dummy
        self._agent = None
        self._agent_status = constants.AgentStatus.NOT_STARTED

        # Hack.
        self.scenario_params = presets.Driving.SCENE_1
        self.scenario_params_int = dict()
        self.scenario_params_float = dict()

        self.server_action = (0,)
        self.client_action = (0,)
        self.state = dict()
        self.targets = dict()

        self.prev_action_hash = float('inf')
        self._action_timer = utils.Timer(1.0 / 8.0)

        logging.info('Server state reset.')
Esempio n. 29
0
def encode_docs_qs(args, data_loader, model, global_stats, mode):
    """Encode given documents.
    """
    eval_time = utils.Timer()

    # Encode documents
    examples = 0
    documents = []
    questions = []
    with torch.no_grad():
        for ex in data_loader:
            docs, qs = model.encode(ex)
            batch_size = ex[0].size(0)
            examples += batch_size

            # Save encoded documents and questions
            documents.append(docs)
            questions.append(qs)

        documents = torch.cat(documents, 0)
        questions = torch.cat(questions, 0)
    logger.info('%s valid encoding: Epoch = %d | encoded = %d | ' %
                (mode, global_stats['epoch'], documents.size(0)) +
                'examples = %d | ' % (examples) +
                'valid time = %.2f (s)' % eval_time.time())

    return documents, questions
Esempio n. 30
0
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.n = 1000
        self.butterfly = utils.get_img(path.join('example', 'butterfly.png'))
        # Batching
        self.butterfly = self.butterfly.repeat(16, 1, 1, 1)
        self.m = torch.Tensor([
            [3.2, 0.016, -68],
            [1.23, 1.7, -54],
            [0.008, 0.0001, 1],
        ])
        if cuda.is_available():
            self.butterfly = self.butterfly.cuda()
            self.m = self.m.cuda()

            with utils.Timer('Warm-up: {}'):
                for _ in range(100):
                    _ = core_warp.warp(
                        self.butterfly,
                        self.m,
                        sizes='auto',
                        kernel='bicubic',
                        fill_value=0,
                    )

                cuda.synchronize()