Ejemplo n.º 1
0
def run():
    """
    Prepares and runs the whole system.
    """
    args = parse_args()

    logger = logging.getLogger("brc")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    if args.log_path:
        file_handler = logging.FileHandler(args.log_path)
        file_handler.setLevel(logging.INFO)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
    else:
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler)

# string=",".join([g_num for g_num in args.gpus])
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = string
    logger.info('Running with args : {}'.format(args))
    if args.prepare:
        prepare(args)
    if args.train:
        train(args)
    if args.evaluate:
        evaluate(args)
    if args.predict:
        predict(args)
Ejemplo n.º 2
0
def main():
    # Create Metadata
    metadata = Metadata()
    # Create Replay Buffer
    g_buffer = ReplayMem(metadata)

    with Game(metadata) as game:
        # Create Agent
        agent = DDQLAgent(game.env.action_space.n)

        # Load files
        file_path = Path(Constants.MODEL_PATH)

        metadata_file = (file_path / "metadata.p")
        if all([x.validate_load()
                for x in [g_buffer, agent]]) and metadata_file.is_file():
            print("Loading state...")
            with metadata_file.open("rb") as metadata_file:
                metadata = pickle.load(metadata_file)  # Metadata
            game.load_metadata(metadata)
            g_buffer.load(metadata)
            agent.load()

        print("Running Eval...")
        evaluate(game, metadata, agent, 0)
Ejemplo n.º 3
0
def evaluate_generations(name,
                         experiment_id,
                         folder=None,
                         hops: int = 10,
                         unused_cpu: int = 2):
    """
    Evaluate the population across its lifetime. At each generation, the ten best genomes are evaluated together with
    the elite genome of the past five generations.

    :param name: Name of the population
    :param experiment_id: Experiment for which the population is trained (and now will be evaluated)
    :param folder: Population-folder (~experiment level)
    :param hops: Number of generations between each saved population
    :param unused_cpu: Number of CPU cores not used
    """
    # Fetch population and evaluation games
    folder = folder if folder else get_folder(experiment_id)
    pop = Population(
        name=name,
        folder_name=folder,
        log_print=False,
        use_backup=True,
    )
    _, game_ids_eval = get_game_ids(experiment_id=experiment_id)

    # Perform the evaluations
    max_gen = pop.generation
    for gen in tqdm(range(0, max_gen + 1, hops)):
        # Load in the current generation
        if not pop.load(gen=gen):
            raise Exception(
                f"Population {name} is not trained for generation {gen}")

        # Collect the used genomes
        if gen > 5:
            genomes = sorted([g for g in pop.population.values()],
                             key=lambda x: x.fitness if x.fitness else 0,
                             reverse=True)[:10]
            for i in range(1, 6):
                keys = [g.key for g in genomes]
                g = copy.deepcopy(pop.best_genome_hist[gen - i]
                                  [1])  # Copy since chance of mutation
                while g.key in keys:  # Already added to genomes, update keys
                    g.key += 1
                genomes.append(g)
        else:
            # No history yet, use only the ten most fit genomes from the current generation
            genomes = sorted([g for g in pop.population.values()],
                             key=lambda x: x.fitness if x.fitness else 0,
                             reverse=True)[:15]

        # Evaluate the selected genomes
        evaluate(
            population=pop,
            games=game_ids_eval,
            genomes=genomes,
            unused_cpu=unused_cpu,
            overwrite=True,
        )
    def test_evaluate(self):
        """> Test if the population can be evaluated."""
        # Folder must be root to load in make_net properly
        if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')

        pop = get_population()
        evaluate(
            population=pop,
            games=[-2],
            debug=True,
        )
Ejemplo n.º 5
0
def test_evaluate_expressions_with_sublevel():
    expressions = {
        '(4+4) * 10': 80,
        '(10/10) * (-5+1)': -4,
    }
    for expr, result in expressions.items():
        assert result == main.evaluate(expr)
Ejemplo n.º 6
0
def test_evaluate_expressions_with_two_sublevels():
    expressions = {
        '(4+(16/4)) * 10': 80,
        '((4x2+2)/(5x2)) * ((5-10)+(10-9))': -4,
    }
    for expr, result in expressions.items():
        assert result == main.evaluate(expr)
Ejemplo n.º 7
0
def test_evaluate_simple_values():
    expressions = {
        ' + 1 ': 1,
        '-1': -1,
    }
    for expr, result in expressions.items():
        assert result == main.evaluate(expr)
Ejemplo n.º 8
0
def test_evaluate_expressions_more_complex():
    "all expressions from https://teresachiacchio.wixsite.com/matemagica/expressoes"
    expressions = {
        '4 – [– (6 + 4) + (3 – 2 – 1)]': 14,
        '15 x 2 – 30 ÷ 3 + 7': 27,
        '10 x [30 ÷ (2 x 3 + 4) + 15]': 180,
        '25 + {14 – [25 x 4 + 40 – (20 ÷ 2 + 10)]}': -81,
        '37 – 14 + 35 – 10': 48,
        '32 ÷ 2 . 3 ÷ 4 . 5': 3.091787439613527,
        '32 ÷ 2 x 3 ÷ 4 x 5': 60,
        '180 ÷ 4 * {9 ÷ [3 * (3 * 1)]}': 45,
        '16 : (-4) x 2': -8,
        '16 x(-4): 2': -32,
        '10 + 2² x 3': 22,
        '5² – 4 x 2 + 3': 20,
        '20 – [4² + ( 2³ – 7 )]': 3,
        '10 –{ 10 + [ 8² : ( 10 – 2 ) + 3 x 2 ] }': -14,
        '27 + {14 + 3 x [100 : (18 – 4 x 2) + 7] } : 13': 32,
        '{100 – 413 x (20 – 5 x 4) + 25} : 5': 25,
        '25 + { 12 + [ 2 – ( 8 – 6 ) + 2 ]}': 39,
        '38 – { 20 – [ 22 – ( 5 + 3) + ( 7 – 4 +1)]}': 36,
        '26 + { 12 – [ ( 30 – 18) + ( 4 – 1) – 6 ] – 1 }': 28,
        '(90+10)*2 / (90+(1000/    (   50+25*2)))': 2,
    }
    for expr, result in expressions.items():
        assert result == main.evaluate(expr)
Ejemplo n.º 9
0
def test_attribute_evaluation():
    evaluation, model_cfgs = evaluation_builder.build(attribute_evaluation_cfg)
    with torch.no_grad():
        for model_cfg in model_cfgs:
            model = model_builder.build(model_cfg)
            model = DataParallel(model)
            score = evaluate(evaluation, model, delete=True)
            print(score)
Ejemplo n.º 10
0
def test_evaluate_sequence_expressions():
    expressions = {
        ' 2 x 2 / 2 ': 2,
        ' 2 / 2 * 2 ': 2,
        ' 2 + 2 / 2 ': 3,
        ' 2 / 2 + 1 ': 2,
    }
    for expr, result in expressions.items():
        assert result == main.evaluate(expr)
Ejemplo n.º 11
0
def test_evaluate_simple_expressions():
    expressions = {
        ' 1 + 1 ': 2,
        '100 / 4': 25,
        '9 * 9': 81,
        '15 - 5': 10
    }
    for expr, result in expressions.items():
        assert result == main.evaluate(expr)
Ejemplo n.º 12
0
def attack_PGM(model,test_data_loader, p=2, epsilon = 0.01, alpha = 0.1, random=False) :
    model.eval()
    T_adv = 15
    loss_function = nn.CrossEntropyLoss()
    valid_data_x = torch.FloatTensor(len(test_data_loader.dataset),1,28,28)
    valid_data_y = torch.LongTensor(len(test_data_loader.dataset))
    count = 0
    
    for x_, y_ in test_data_loader :
        if USE_CUDA:
            x_, y_ = x_.cuda(), y_.cuda()
        input_var, target_var  = Variable(x_, requires_grad=True), Variable(y_)
        
        if random == True : 
            noise = torch.FloatTensor(x_.size()).uniform_(-epsilon, epsilon)
            if USE_CUDA : 
                noise = noise.cuda()
            input_var.data += noise

        #generate attack data
        for n in range(1, T_adv + 1) :
            step_alpha = float(alpha /np.sqrt(n))
            zero_gradients(input_var)
            output = model(input_var)
            loss = loss_function(output, target_var)
            loss.backward()
            x_grad = input_var.grad.data
            if p == 2:
#                delta_x = epsilon *  x_grad / torch.norm(x_grad.view(len(x_),1),2,1)
                grad_ = x_grad.view(len(x_),-1)
                grad_ = grad_/torch.norm(grad_,2,1).view(len(x_),1).expand_as(grad_)
                normed_grad = epsilon * grad_.view_as(x_grad)  
            else:
                # infinity-norm
                normed_grad =  epsilon * torch.sign(x_grad)
            # xi + alpha_t * delta_x
#            normed_grad = step_alpha * normed_grad 
#            normed_grad.clamp_(-epsilon, epsilon)
#            input_var.data +=  normed_grad

            normed_grad.clamp_(-epsilon, epsilon)
            step_adv = input_var.data + step_alpha * normed_grad # x^(t+1) = x^(t) + alpha * delta_x^t
            total_adv = step_adv - x_  #x^t - x
            total_adv.clamp_(-epsilon, epsilon) # ||x^t-x|| <= epsilon
            input_adv = x_ + total_adv 
            input_adv.clamp_(-1.0, 1.0) #mnist data between -1,1
            input_var.data = input_adv
            
#            print (np.all(input_var.data.cpu() == x_.data))
            
        valid_data_x[count:count+len(x_),:] = input_var.data.cpu()
        valid_data_y[count:count+len(x_)] = y_.clone().cpu()
        count += len(x_)
    dataset = torch.utils.data.TensorDataset(valid_data_x, valid_data_y)
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
    return main.evaluate(model,data_loader)
Ejemplo n.º 13
0
def main(_):
    # set up logger
    tf.logging.set_verbosity(tf.logging.INFO)

    params = global_params

    # try loading parameters
    # priority: command line > saver > default
    # 1. load latest path to load parameters
    params.parse(flags.FLAGS.parameters)
    params = load_parameters(params, params.output_dir)
    # 2. refine with command line parameters
    params.parse(flags.FLAGS.parameters)

    # set up random seed
    random.seed(params.random_seed)
    np.random.seed(params.random_seed)
    tf.set_random_seed(params.random_seed)

    # loading vocabulary
    tf.logging.info("Begin Loading Vocabulary")
    start_time = time.time()
    params.src_vocab = Vocab(params.src_vocab_file)
    params.tgt_vocab = Vocab(params.tgt_vocab_file)
    tf.logging.info("End Loading Vocabulary, Source Vocab Size {}, "
                    "Target Vocab Size {}, within {} seconds".format(
                        params.src_vocab.size(), params.tgt_vocab.size(),
                        time.time() - start_time))

    mode = flags.FLAGS.mode
    if mode == "train":
        # save parameters
        save_parameters(params, params.output_dir)

        # load the recorder
        params = setup_recorder(params)

        graph.train(params)
    elif mode == "test":
        graph.evaluate(params)
    else:
        tf.logging.error("Invalid mode: {}".format(mode))
Ejemplo n.º 14
0
def test_reid_evaluation():
    dataloader = dataloader_builder.build(reid_cfg)
    # restore
    model_cfgs = evaluation_model_builder.build(baseline_model_cfg)

    model = model_builder.build(model_cfgs[0])
    model = DataParallel(model)
    _run = {'config': {'device': torch.device('cuda')}}
    score = evaluate([dataloader], model, _run, "test")

    print(score)
Ejemplo n.º 15
0
def main(args):
    """Evaluation corresponding to given argparse arguments."""
    # device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    agent, env, eval_env = configure(args)
    agent = agent.to(device)

    # load checkpoints if directory is not empty
    agent_file = join(args.logdir, 'best_agent.pt')
    R = - np.inf
    if exists(agent_file):
        state_dict = torch.load(agent_file)
        R = state_dict["return"]
        info(f"eval> Loading agent with return {R}...")
        agent.load_state_dict(state_dict)
    else:
        raise ValueError(f"{agent_file} does not exists, no agent available...")

    evaluate(args.dt, 0, eval_env, agent, args.time_limit,
             eval_return=True, video=True, progress_bar=True, no_log=True)
Ejemplo n.º 16
0
def attack_WRM(model,test_data_loader, gamma, max_lr0, epsilon = 0.01, random=False, get_err=False) :
    model.eval()
    T_adv = 15
    loss_function = nn.CrossEntropyLoss()
    
    if get_err:
        valid_data_x = torch.FloatTensor(len(test_data_loader.dataset),1,28,28)
        valid_data_y = torch.LongTensor(len(test_data_loader.dataset))
    
    count = 0
    err=0
    rhos=[]
    for x_, y_ in test_data_loader :
        if USE_CUDA:
            x_, y_ = x_.cuda(), y_.cuda()
        x_, y_  = Variable(x_), Variable(y_)

        #initialize z_hat with x_
        z_hat = x_.data.clone()
        if USE_CUDA:
            z_hat = z_hat.cuda()
        if random : 
            noise = torch.FloatTensor(x_.size()).uniform_(-epsilon, epsilon)
            if USE_CUDA : 
                noise = noise.cuda()
            z_hat += noise
            
        z_hat = Variable(z_hat,requires_grad=True)
        #running the maximizer for z_hat
        optimizer_zt = torch.optim.Adam([z_hat], lr=max_lr0)
        loss_zt = 0 # phi(theta,z0)
        rho = 0 #E[c(Z,Z0)]
        for n in range(1,T_adv+1) :
            optimizer_zt.zero_grad()
            delta = z_hat - x_
            rho = torch.mean((torch.norm(delta.view(len(x_),-1),2,1)**2)) 
            loss_zt = - ( loss_function(model(z_hat),y_)-  gamma * rho)
            loss_zt.backward()
            optimizer_zt.step()
            main.adjust_lr_zt(optimizer_zt,max_lr0, n+1)
            
        rhos.append(rho.data[0])
        
        if get_err:
            valid_data_x[count:count+len(x_),:] = z_hat.data.cpu().clone()
            valid_data_y[count:count+len(x_)] = y_.data.cpu().clone()
            count += len(x_)
    if get_err:
        dataset = torch.utils.data.TensorDataset(valid_data_x, valid_data_y)
        data_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
        err=1.0-main.evaluate(model,data_loader)/100

    return torch.mean(torch.FloatTensor(rhos)),err
Ejemplo n.º 17
0
def main(sys_args):
    parser = argparse.ArgumentParser(description='Testing helper for DeNSe parser.')

    parser.add_argument('--test_file', type=str, required=True,
                        help='test data')
    parser.add_argument('--output_file', type=str, default=None,
                        help='output file')
    parser.add_argument('--model_dir', type=str, default='model',
                        help='folder of the model')
    parser.add_argument('--data_model', type=str, default='data.pkl',
                        help='data model file')
    parser.add_argument('--classifier_model', type=str, default='model.pkl',
                        help='classifier model file')
    parser.add_argument('--best_model', type=str, default='best.pkl',
                        help='best classifier model file')

    parser.add_argument('--batch_size', type=int, default=50,
                        help='mini-batch size')

    args = parser.parse_args(sys_args)
    evaluate(args)
Ejemplo n.º 18
0
def play_models_and_update(black, white, elos):
  output_dir = '/tmp/play_models2'
  os.system('mkdir ' + output_dir);
  white_win = main.evaluate(black, white, output_dir=output_dir, games=1, readouts=400)

  black_elo = elos[black]
  white_elo = elos[white]
  if white_win > 0:
    white_elo, black_elo = new_elos(white_elo, black_elo)
  else:
    black_elo, white_elo = new_elos(black_elo, white_elo)
  elos[black] = black_elo
  elos[white] = white_elo
Ejemplo n.º 19
0
def test():

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    saver.restore(sess=sess, save_path=save_path)

    word2id = read_vocab(vocab_dir)
    query_test, doc_test = process_file(test_query_dir, test_docs_dir, word2id)

    print('Testing...')
    loss_test, p_1_test, mrr_test = evaluate(sess, query_test, doc_test)
    msg = 'Test Loss: {0:>6.2}, Test P@1: {1:>7.2%}, Test MRR: {2:8.2%}'
    print(msg.format(loss_test, p_1_test, mrr_test))
Ejemplo n.º 20
0
def evaluate_population(population):
    for count, chromosome in enumerate(population):
        logger.info('Evaluating network %d of %d' % (count, len(population)))
        parameter = chromosome['params']

        # chromosome['fitness'] = 1 / parameter['learning_rate'] * parameter['batch_size']
        # chromosome['fitness'] *= parameter['l2_reg_scale'] * parameter['dropout_rate']
        # chromosome['fitness'] *= parameter['shuffle'] * 100
        # chromosome['fitness'] *= parameter['hidden_units'][0]

        try:
            error = g_research.evaluate(max_steps=1.2e5, **parameter)
            chromosome['fitness'] = 1.0 / error
        except:
            logger.exception('Error while evaluating {}'.format(parameter))
            chromosome['fitness'] = 0.0
Ejemplo n.º 21
0
def play_models_and_update(black, white, elos):
    output_dir = '/tmp/play_models2'
    os.system('mkdir ' + output_dir)
    white_win = main.evaluate(black,
                              white,
                              output_dir=output_dir,
                              games=1,
                              readouts=400)

    black_elo = elos[black]
    white_elo = elos[white]
    if white_win > 0:
        white_elo, black_elo = new_elos(white_elo, black_elo)
    else:
        black_elo, white_elo = new_elos(black_elo, white_elo)
    elos[black] = black_elo
    elos[white] = white_elo
Ejemplo n.º 22
0
 def test_evaluate(self):
     self.assertEqual(
         main.evaluate({
             'type': '/',
             'left': {
                 'type': '+',
                 'left': {
                     'type': 'number',
                     'value': '1'
                 },
                 'right': {
                     'type': 'number',
                     'value': '2'
                 },
             },
             'right': {
                 'type': 'number',
                 'value': '3'
             },
         }), 1)
Ejemplo n.º 23
0
def forex_eval(params):
    print(f'Using params: {params}')
    now_str = str(datetime.now()).replace(':', '-').replace(' ',
                                                            '_').split('.')[0]
    writer = SummaryWriter(os.path.join(log_dir, now_str))
    config = helpers.get_config(config_path)
    if 'n_dense_layers' in params and 'n_nodes_dense_layers' in params:
        config['dense_params'] = [{
            'out_features':
            params['n_nodes_dense_layers']
        } for _ in range(params['n_dense_layers'])]
    if 'n_conv_layers' in params and 'conv_filter_size' in params and 'conv_kernel_size' in params:
        config['conv_params'] = [{
            'out_channels': params['conv_filter_size'],
            'kernel_size': params['conv_kernel_size']
        } for _ in range(params['n_conv_layers'])]
    for param, value in params.items():
        if param in config: config[param] = value
    model = train(device, writer, config, data_dir)
    metrics = evaluate(model, device, writer, config, data_dir, config_path)
    save(model, device, metrics, now_str, config, model_dir, config_path)
    return {k: (v, 0.0) for k, v in metrics.items() if v is not tuple}
Ejemplo n.º 24
0
def create_visuals(model_path):
    from config import args

    args['malnet_tiny'] = False
    create_image_symlinks(args)
    train_gen, val_gen, test_gen = get_generators(args)

    args['y_train'] = train_gen.labels
    args['class_indexes'] = list(val_gen.class_indices.values())
    args['class_labels'] = list(val_gen.class_indices.keys())
    args['num_classes'] = len(val_gen.class_indices.keys())

    os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
        str(d) for d in args['devices'])

    model = tf.keras.models.load_model(model_path, compile=False)

    loss, _ = get_loss(args)
    model.compile(loss=loss, optimizer='adam', metrics=[])

    y_pred, y_scores = evaluate(test_gen, model)
    y_true = test_gen.labels.tolist()

    images_to_view = []
    counts = defaultdict(int)

    for idx, label in enumerate(y_true):
        if counts[label] < 20:
            images_to_view.append(idx)
            counts[label] += 1

    for idx in images_to_view:
        if y_pred[idx] == y_true[idx]:
            path = test_gen.filepaths[idx]
            run_gradcam2(path, model)
Ejemplo n.º 25
0
 def test_evaluate(self):
     list = ['aber das nächste mal doch bitte im querformat']
     result = mn.evaluate(list)
     self.assertEqual(result, 0)
Ejemplo n.º 26
0
                          vtype=grb.GRB.CONTINUOUS,
                          name="movies")

    # re-write obj function using constraints, since gurobi does not support mupltication of multiple variables
    objective = 0
    for i, j in zip(rows, cols):
        z = opt_model.addVar(name=f'z_{i}_{j}')
        opt_model.addConstr(u[i] * v[j] == z)
        objective += (z - trainX[i, j]) * (z - trainX[i, j])

    start = time.time()
    # OPTIMIZE
    opt_model.setObjective(objective, grb.GRB.MINIMIZE)
    opt_model.optimize()
    elapsed = time.time() - start
    print(f'Optimization took {elapsed}s')
    # get u and v vectors
    u_arr = np.array([u[i].X for i in range(dataset.n_users)], dtype=np.float64)
    v_arr = np.array([v[i].X for i in range(dataset.n_movies)], dtype=np.float64)
    print('shapes', u_arr.shape, v_arr.shape)
    # store latest feature vectors
    np.save('./data/gurobi2_U.npy', u_arr)
    np.save('./data/gurobi2_V.npy', v_arr)
    # keep evaluation schema fixed wrt other experiments
    train_mse = evaluate(u_arr, v_arr, trainX, 'sparse')
    # mse = evaluate(u_arr, v_arr, testX, 'sparse')

    als = ALSSparse(u=u_arr, v=v_arr, dataset=trainX)
    fun_eval = als.function_eval()
    print("Final function evaluation", fun_eval)
Ejemplo n.º 27
0
def main(_):
    # set up logger
    tf.logging.set_verbosity(tf.logging.INFO)

    tf.logging.info("Welcome Using Zero :)")

    params = global_params

    # try loading parameters
    # priority: command line > saver > default
    # 1. load latest path to load parameters
    if os.path.exists(flags.FLAGS.config):
        params.override_from_dict(eval(open(flags.FLAGS.config).read()))
    params = load_parameters(params, params.output_dir)
    # 2. refine with command line parameters
    if os.path.exists(flags.FLAGS.config):
        params.override_from_dict(eval(open(flags.FLAGS.config).read()))
    params.parse(flags.FLAGS.parameters)

    # set up random seed
    random.seed(params.random_seed)
    np.random.seed(params.random_seed)
    tf.set_random_seed(params.random_seed)

    # loading vocabulary
    tf.logging.info("Begin Loading Vocabulary")
    start_time = time.time()
    full_task = get_task(params, True)
    if not os.path.exists(params.word_vocab_file):
        params.word_vocab = Vocab(lower=params.lower)
        params.word_vocab.make_vocab(
            full_task,
            use_char=False,
            embedding_path=params.pretrain_word_embedding_file)
    else:
        params.word_vocab = Vocab(lower=params.lower,
                                  vocab_file=params.word_vocab_file)
    if params.use_char:
        if not os.path.exists(params.char_vocab_file):
            params.char_vocab = Vocab(lower=False)
            params.char_vocab.make_vocab(full_task,
                                         use_char=True,
                                         embedding_path=None)
        else:
            params.char_vocab = Vocab(lower=False,
                                      vocab_file=params.char_vocab_file)

    tf.logging.info("End Loading Vocabulary, Word Vocab Size {}, "
                    "Char Vocab Size {}, within {} seconds".format(
                        params.word_vocab.size(),
                        params.char_vocab.size() if params.use_char else 0,
                        time.time() - start_time))

    if flags.FLAGS.mode == "vocab":
        save_parameters(params, params.output_dir)
        return

    # save parameters
    if flags.FLAGS.mode == "train":
        save_parameters(params, params.output_dir)

    # loading bert config
    if params.enable_bert:
        bert_config = bert.load_config(params.bert_dir)
        params.bert = tc.training.HParams(**bert_config)

        # loading vocabulary
        tf.logging.info("Begin Loading Vocabulary")
        start_time = time.time()
        params.bert.vocab = bert.load_vocab(params.bert_dir)
        tf.logging.info(
            "End Loading Vocabulary, Vocab Size {}, within {} seconds".format(
                params.bert.vocab.size,
                time.time() - start_time))

    # loading task label information
    params.label_size = full_task.get_label_size()

    # print parameters
    print_parameters(params)

    # print the used datasets
    tf.logging.info("Task {} is performed with data {}".format(
        params.task, full_task.data_path))

    mode = flags.FLAGS.mode
    if mode == "train":
        # load the recorder
        params = setup_recorder(params)

        graph.train(params)
    elif mode == "test":
        graph.evaluate(params)
    else:
        tf.logging.error("Invalid mode: {}".format(mode))
Ejemplo n.º 28
0
def test_evaluate_with_wrong_text():
    with pytest.raises(SyntaxError):
        main.evaluate('')
Ejemplo n.º 29
0
def main():
    population = load_best_population()
    individual = population[0]
    evaluate(individual=individual, render=True)
Ejemplo n.º 30
0
def test_evaluate_multiple_evaluations():
    assert 42 == main.evaluate(main.evaluate('5² – 4 x 2 + 3'), '+', main.evaluate('10 + 2² x 3'))
Ejemplo n.º 31
0
def test_evaluate_with_wrong_parentheses():
    with pytest.raises(SyntaxError):
        main.evaluate('(1+1')
Ejemplo n.º 32
0
 def test3(self):
   self.assertEqual(evaluate("5"), 5)
Ejemplo n.º 33
0
 def test4(self):
   self.assertEqual(evaluate("5+4-3"), 6)
Ejemplo n.º 34
0
 def test4(self):
   self.assertEqual(evaluate("5-(4-3)"), 4)
Ejemplo n.º 35
0
 def test1(self):
   self.assertEqual(evaluate("5+4"), 9)
Ejemplo n.º 36
0
 def test2(self):
   self.assertEqual(evaluate("5+4+3"), 12)