コード例 #1
0
 def __init__(self, parameters: List[Tensor]=None, learning_rate: float=0.01, beta: float=0.9, bias_correction=False):
     Optimizer.__init__(self, parameters)
     self.learning_rate = learning_rate
     self.beta = beta
     self.bias_correction = bias_correction
     self.t = 0
     self.m = zeros_like_list(self.parameters)
コード例 #2
0
def learn(in_data: ndarray, out_data: ndarray, test_in_data: ndarray,
          test_out_data: ndarray, model_func: Callable[[Tensor, Tensor, bool], Tensor],
          loss_func: Callable[[Tensor, Tensor, bool], Tensor], optimizer: Optimizer,
          score_func: Callable[[Tensor, ndarray], float]=None,
          batch_size: int=100, epoch_number: int=100):
    input_data = in_data.astype(np.float32)
    output_data = out_data.astype(np.float32)
    test_input_data = test_in_data.astype(np.float32)
    test_output_data = test_out_data.astype(np.float32)
    train_loss_values = []
    test_loss_values = []
    test_score_values = []
    start = time()
    for i in range(0, epoch_number):
        bit = BatchIterator(input_data, output_data, batch_size)
        iter_loss = 0
        for b_in, b_out in bit:
            x = Tensor(b_in)
            y = Tensor(b_out)
            model = model_func(x, y, True)
            loss = loss_func(y, model, True)
            iter_loss += loss.data[0] / input_data.shape[0]
            optimizer.step(loss)
        if score_func is not None:
            test_loss, err_ratio = score_test(test_input_data, test_output_data, model_func, loss_func, score_func)
        else:
            err_ratio = 'N/A'
            test_loss = 'N/A'
        train_loss_values.append(iter_loss)
        test_loss_values.append(test_loss)
        test_score_values.append(err_ratio)   
        print("Iteration {0} train-loss: {1}, test-loss: {2}, score: {3}%".format(i, iter_loss, test_loss, err_ratio))
    end = time()
    print("Execution time: {0}s".format(end - start))
    return train_loss_values, test_loss_values, test_score_values
コード例 #3
0
    def __init__(self, input_dims, output_dims, hidden_size1, hidden_size2,
                 std, output_lower_bound, output_upper_bound, sigma, alpha,
                 pop, env, discrete, optimizer):

        self.input_dims = input_dims
        self.output_dims = output_dims
        self.output_lower_bound = output_lower_bound
        self.output_upper_bound = output_upper_bound
        self.sigma = sigma
        self.alpha = alpha
        self.pop = pop
        self.env = env
        self.discrete = discrete
        self.params = {
            'w1': np.random.normal(0, std, [input_dims, hidden_size1]),
            'b1': np.zeros([hidden_size1, 1]),
            'w2': np.random.normal(0, std, [hidden_size1, hidden_size2]),
            'b2': np.zeros([hidden_size2, 1]),
            'w3': np.random.normal(0, std, [hidden_size2, output_dims]),
            'b3': np.zeros([output_dims, 1])
        }

        self.optimizers = dict()
        for key in self.params.keys():
            if optimizer == 'adam':
                self.optimizers[key] = Optimizer(self.params[key], adam=True)
            if optimizer == 'momentum':
                self.optimizers[key] = Optimizer(self.params[key], adam=False)
コード例 #4
0
 def optimize(self, layer_i: int, optimizer: Optimizer):
     # 算出weight和bias在優化器中的識別碼後,放入優化器進行優化。
     self.linear.weight = optimizer.update(
         layer_i * Layer._max_params_n + 0,
         self.linear.weight, self.linear.grad_weight)
     self.linear.bias = optimizer.update(
         layer_i * Layer._max_params_n + 1,
         self.linear.bias, self.linear.grad_bias)
コード例 #5
0
 def __init__(self,
              parameters: List[np.ndarray] = None,
              learning_rate: float = 0.01,
              beta: float = 0.9,
              epsilon: float = 1e-08,
              bias_correction=False):
     Optimizer.__init__(self, parameters)
     self.learning_rate = learning_rate
     self.beta = beta
     self.epsilon = epsilon
     self.bias_correction = bias_correction
     self.t = 0
     self.v = zeros_like_list(parameters)
コード例 #6
0
ファイル: adam_optimizer.py プロジェクト: argocan/plautodiff
 def __init__(self,
              parameters: List[Tensor] = None,
              learning_rate: float = 0.001,
              beta_1: float = 0.9,
              beta_2: float = 0.999,
              epsilon: float = 1e-08,
              bias_correction=True):
     Optimizer.__init__(self, parameters)
     self.learning_rate = learning_rate
     self.beta_1 = beta_1
     self.beta_2 = beta_2
     self.epsilon = epsilon
     self.bias_correction = bias_correction
     self.t = 0
     self.m = zeros_like_list(self.parameters)
     self.v = zeros_like_list(self.parameters)
コード例 #7
0
    def compile_model(
            self, lr=0.01, mr=0.001,
            opt="sgd", loss="mse", metrics=['mse']):

        if opt not in self.av_optimizers:
            raise ValueError(f"Optimizer is not understood, \
                use one of {self.av_optimizers}.")

        for m in metrics:
            if m not in self.av_metrics:
                raise ValueError(f"Metrics is not \
                    understood, use one of {self.av_metrics}.")

        if loss not in self.av_loss:
            raise ValueError(f"Loss function is not \
                understood, use one of {self.av_loss}.")

        self.optimizer = opt
        self.loss = loss
        self.lr = lr
        self.mr = mr
        self.metrics = metrics
        self.iscompiled = True
        self.optimizer = Optimizer(
            layers=self.layers, name=opt, learning_rate=lr, mr=mr)
        self.optimizer = self.optimizer.opt_dict[opt]
コード例 #8
0
    def compile(self,
                initializer='random',
                loss='mean_squared_error',
                optimizer=Optimizer()):

        self.parameters = initialize(self.layers, initializer)
        self.loss = loss
        self.optimizer = optimizer
コード例 #9
0
ファイル: train.py プロジェクト: adithyarajk/MyNet
def train(
    net: NeuralNet,
    inputs: Tensor,
    targets: Tensor,
    num_epochs: int = 5000,
    iterator: DataIterator = BatchIterator(),
    loss=MSE(),
    optimizer: Optimizer = SGD()
) -> None:

    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            predicted = net.forward(batch.inputs)
            epoch_loss += loss.loss(predicted, batch.targets)
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
        print(epoch, epoch_loss)
コード例 #10
0
def main():
    optimizer_list = [
        Optimizer(name) for name in ["Adam", "Adagrad", "RMSprop", "SGD"]
    ]

    # use last location to draw a line to the current location
    for i in range(1000):
        for j in range(len(optimizer_list)):
            model = optimizer_list[j]
            model.train_step()

        if i % 10 == 0:
            ax = init_plot()
            for j in range(len(optimizer_list)):
                optimizer_list[j].plot(ax)
            plt.legend()
            plt.show()
コード例 #11
0
ファイル: adam_opimizer.py プロジェクト: dohnala/GridWorld
 def create(self, parameters):
     return Optimizer(optim.Adam(parameters, lr=self.learning_rate),
                      parameters)
コード例 #12
0
if checkpoint is not None:
    logger.info('Starting from checkpoint')
    with open(checkpoint, 'rb') as f:
        layer_params = pickle.load(f)
    # restoring networks parameters
    layer_params = layer_params

    # Restoring optimizer updates
    # Discriminator updates
    lasagne.layers.set_all_param_values(network, layer_params, trainable=True)
# Generator updates

logging.info('Compiling updates ...')
updates = Optimizer(clip=5.0).rmsprop(
    cost=cost,
    params=params,
)

logging.info('Compiling train function ...')
f_train = theano.function(
    inputs=[x, y, mask],
    outputs=cost,
    updates=updates,
    on_unused_input='warn',
)

logging.info('Compiling eval function ...')
f_eval = theano.function(
    inputs=[x],
    outputs=final_output,
    on_unused_input='warn',
コード例 #13
0
def train(config):
    print('get the training data')
    #logging.info('get the training data')
    train_data_loader = prepareData(config)
    print('The training data is %d batches with %d batch sizes' %
          (len(train_data_loader), config['batch_size']))
    #logging.info('The training data is %d batches with %d batch sizes'%(len(train_data_loader), config['batch_size']))
    print('------------------------------------------')
    print('get the config information')
    print(config)
    #logging.info(config)
    print('------------------------------------------')
    print('get the valid data')
    config['train_file'] = config['valid_file']
    config['shuffle'] = True
    test_data_loader = prepareData(config)
    print('The valid data is %d batches with %d batch sizes' %
          (len(test_data_loader), config['batch_size']))
    print('------------------------------------------')
    print('Train step! The model runs on ' + str(device))
    #logging.info('Train step! The model runs on ' + str(device))
    if config['update_model']:
        model = TextSlfAttnNet(config).to(device)
        load_model_name = config['update_model']
        print('load update model name is :' + load_model_name)
        checkpoint = torch.load(load_model_name)
        model.load_state_dict(checkpoint['net'])
    else:
        model = TextSlfAttnNet(config).to(device)
    if config['warm_up']:
        optimizer = Optimizer('adam',
                              config['learning_rate'],
                              0,
                              lr_decay=0.5,
                              beta1=0.9,
                              beta2=0.98,
                              decay_method='noam',
                              start_decay_steps=None,
                              decay_steps=None,
                              warmup_steps=16000,
                              model_size=200,
                              warmup_start_lr=1e-07,
                              optims='fairseq')
        optimizer.set_parameters(model)
    else:
        optimizer = optim.Adam(model.parameters(), lr=config['learning_rate'])
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=10,
                                              gamma=0.1)
    total_params = sum(p.numel() for p in model.parameters())
    print("total params", total_params)
    loss_list = dict()
    best_f1 = 0.0
    total_time = 0.0
    batch_step = 0  #
    for epoch in range(config['epochs']):
        start = time.time()
        model.train()
        model.zero_grad()
        total_loss = 0
        batch_loss = 0  #
        current_batch = 0  #
        for batch, data in enumerate(train_data_loader):
            model.train()  #
            batch_step += 1
            inputs, labels, bi_inputs, pos_id, seq_len = data
            #if not bigram, the bi_inputs is None
            loss = model.calculate_loss(inputs.to(device),
                                        bi_inputs.to(device),
                                        labels.to(device), pos_id.to(device),
                                        seq_len)
            total_loss += float(loss)  #
            batch_loss += float(loss)  #
            current_batch += 1  #
            loss.backward()
            optimizer.step()
            model.zero_grad()

            if (batch_step) % 100 == 0:
                #valid process
                model.eval()
                with torch.no_grad():
                    result_matrix_list = []
                    gold_matrix_list = []
                    for _, data in enumerate(test_data_loader):
                        inputs, labels, bi_inputs, pos_id, seq_len = data
                        tag_space = model(inputs.to(device),
                                          bi_inputs.to(device),
                                          pos_id.to(device), seq_len)
                        result_matrix = tag_space.tolist()
                        result_matrix = [
                            result_matrix[i][:eof]
                            for i, eof in enumerate(seq_len)
                        ]
                        labels = labels.tolist()
                        labels = [
                            labels[i][:eof] for i, eof in enumerate(seq_len)
                        ]
                        result_matrix_list += result_matrix
                        gold_matrix_list += labels
                    P, R, F = score.score(result_matrix_list, gold_matrix_list)
                    #P, R, F = score(result_matrix_list, gold_matrix_list, config['dataset'])
                    #acc = score.accuracy(result_matrix_list, gold_matrix_list)
                    batch_loss_avg = batch_loss / current_batch
                    batch_loss = 0
                    current_batch = 0
                    #logging.info('epoch:'+str(epoch+1)+'||global_step:'+str(batch_step)+'||loss:'+
                    #                str(batch_loss_avg)+'||f:'+str(F))

            print("\rEpoch: %d ! the process is in %d of %d ! " %
                  (epoch + 1, batch + 1, len(train_data_loader)),
                  end='')
        if config['warm_up'] is False:
            scheduler.step()
        end = time.time()
        loss_avg = total_loss / batch_step
        loss_list[epoch] = loss_avg
        print("The loss is %f ! " % (loss_avg))
        print("The time is %f ! " % (end - start))
        total_time += (end - start)

        #valid process
        model.eval()
        start = time.time()
        with torch.no_grad():
            result_matrix_list = []
            gold_matrix_list = []
            for _, data in enumerate(test_data_loader):
                inputs, labels, bi_inputs, pos_id, seq_len = data
                tag_space = model(inputs.to(device), bi_inputs.to(device),
                                  pos_id.to(device), seq_len)
                result_matrix = tag_space.tolist()
                result_matrix = [
                    result_matrix[i][:eof] for i, eof in enumerate(seq_len)
                ]
                labels = labels.tolist()
                labels = [labels[i][:eof] for i, eof in enumerate(seq_len)]
                result_matrix_list += result_matrix
                gold_matrix_list += labels
            end = time.time()
            P, R, F = score.score(result_matrix_list, gold_matrix_list)
            #P, R, F = score(result_matrix_list, gold_matrix_list, config['dataset'])
            #acc = score.accuracy(result_matrix_list, gold_matrix_list)
            print('score| P:%.2f R:%.2f F:%.2f' % (P, R, F))
            total_time += (end - start)
            sum_time = float(end - start)
            per_time = sum_time / (config['batch_size'] *
                                   len(test_data_loader))
            print('the time is %f, process %f time in per sentence' %
                  (sum_time, per_time))
            if F > best_f1:
                best_f1 = F
                if config['warm_up']:
                    state = {
                        'net': model.state_dict(),
                        'optimizer': optimizer.optimizer.state_dict(),
                        'epoch': epoch
                    }
                else:
                    state = {
                        'net': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'epoch': epoch
                    }
                model_name = os.path.join(config['model_path'],
                                          config['model_name'])
                torch.save(state, model_name)
                print('\n the epoch %d is saved successfully, named %s !' %
                      (epoch + 1, model_name))

    #logging.info('epoch:'+str(epoch+1)+'||global_step:'+str(batch_step)+'||loss:'+str(batch_loss_avg)+'||f:'+str(F))
    print('Model training time is: %f' % total_time)
コード例 #14
0
"""
Implementation of Asynchronous Advantage Actor-Critic (A3C) control for the CartPole environment.
"""

from agent import Agent
from optimizers import Optimizer
import time

NUMBER_OF_AGENTS = 8
NUMBER_OF_OPTIMIZERS = 2
RUN_TIME_IN_MINUTES = 5

agents = [Agent() for _ in range(NUMBER_OF_AGENTS)]
optimizers = [Optimizer() for _ in range(NUMBER_OF_OPTIMIZERS)]

agents[0].render = True
for agent in agents:
    agent.start()
for optimizer in optimizers:
    optimizer.start()

try:
    time.sleep(RUN_TIME_IN_MINUTES * 60)
except KeyboardInterrupt:
    for agent in agents:
        agent.stop()
    for agent in agents:
        agent.join()  # Let the agents finish their episode
    for optimizer in optimizers:
        optimizer.stop()
    for optimizer in optimizers:
コード例 #15
0
 def add_parameters(self, new_parameters):
     Optimizer.add_parameters(self, new_parameters)
     self.m += zeros_like_list(new_parameters)
コード例 #16
0
    def create(self, parameters):
        if self.optimizer is None:
            self.optimizer = SharedAdam(parameters, lr=self.learning_rate)
            self.optimizer.share_memory()

        return Optimizer(self.optimizer, parameters)
コード例 #17
0
logging.info('cost : %.3f' % (cost.eval({
    tgt_inp: tgt_inp_t,
    src_inp: src_inp_t,
    tgt_op: tgt_op_t,
    src_lens: src_lens_t,
    tgt_mask: tgt_mask_t
})))

if args.load_model != 'none':
    logging.info('Loading saved model ...')
    src_word2ind, src_ind2word, tgt_word2ind, tgt_ind2word \
        = load_model(args.load_model, params)

logging.info('Compiling theano functions ...')

updates = Optimizer(clip=2.0).adam(cost=cost, params=params, lr=0.0002)

f_train = theano.function(
    inputs=[src_inp, tgt_inp, tgt_op, src_lens, tgt_mask],
    outputs=cost,
    updates=updates)

f_eval = theano.function(
    inputs=[src_inp, tgt_inp, src_lens],
    outputs=final_output,
)

num_epochs = 100
logging.info('Training network ...')
BEST_BLEU = 1.0
costs = []
コード例 #18
0
    def fit(self, input, target, *, epochs=20, lr=None, bs=None,
            optimizer: Union[str, Optimizer] = 'sgd', loss: Union[str, Loss] = 'l2', 
            val_data: Optional[list] = None, callbacks: list = ()) -> dict:
        """Given the input data, train the parameters to fit the target data.
        
        Args:
            input: an array of input data - if 1D, then each point is a number;
                if 2D, then each point is a row vector in the array
            target: an array of target or label data - if 1D, then each point is a number;
                if 2D, then each point is a row vector in the array
            epochs: number of epochs to train  
            lr: learning rate, use lr of the optimizer by default
            bs: batch size, use bs of BatchLoader by default
            optimizer (Optimizer): optimizer of the parameters  
            loss: the metric to measure the training loss (does not affect backprop!)
            val_data: validation data in the form of (x_val, t_val)  
            callbacks (list of function): functions to be called at the end of each epoch,
                each function taking the NN object as input
                
        Returns:
            A dict of training history including loss etc.
        """
        input, target = reshape2D(input), reshape2D(target)
        
        optimizer = Optimizer.get(optimizer, lr)
        loss_func = Loss.get(loss)
        
        batches = BatchLoader(input, target, batch_size=bs)
        history = {'loss': [], 'val_loss': []}

        print('\nStart training', self)
        print('Input shape:', input.shape)
        print('Target shape:', target.shape)
        print('Total epochs:', epochs)
        print('Batch size:', batches.batch_size)
        print('Optimizer:', optimizer)

        for epoch in range(epochs):
            print('\nEpoch:', epoch + 1)
            
            loss = 0
            for xb, tb in pbar(batches):
                yb = self.forward(xb)               # forward pass the input
                loss += loss_func(yb, tb)           # accumulate the loss of the output
                eb = loss_func.backward(yb, tb)     # the error in the output layer
                self.backward(eb)                   # backprop the error
                optimizer.update(self.parameters)   # update parameters

            history['loss'].append(loss / len(target))
            
            if val_data:
                x_val, t_val = val_data
                y_val = self(x_val)
                history['val_loss'].append(loss_func(y_val, t_val))

            print('\t' + ', '.join('%s = %.2f' % (k, v[-1])
                                   for k, v in history.items() if v))

            for callback in callbacks:
                callback(self)

        return history
コード例 #19
0
ファイル: sgd_optimizer.py プロジェクト: argocan/plautodiff
 def __init__(self,
              parameters: List[Tensor] = None,
              learning_rate: float = 0.01):
     Optimizer.__init__(self, parameters)
     self.learning_rate = learning_rate
コード例 #20
0
             }).shape, ))
logging.info('cost : %.3f' % (cost.eval({
    src_inp: src_inp_t,
    src_lens: src_lens_t,
    tgt_mask: tgt_mask_t
})))

if args.load_model != 'none':
    logging.info('Loading saved model ...')
    src_word2ind, src_ind2word, tgt_word2ind, tgt_ind2word \
        = load_model(args.load_model, params, autoencoder=True)

logging.info('Compiling theano functions ...')

updates = Optimizer(clip=5.0).adam(
    cost=cost,
    params=params,
)

f_train = theano.function(inputs=[src_inp, src_lens, tgt_mask],
                          outputs=cost,
                          updates=updates)

f_eval = theano.function(
    inputs=[src_inp, src_lens],
    outputs=final_output,
)

num_epochs = 100
logging.info('Training network ...')
BEST_BLEU = 1.0
costs = []
コード例 #21
0
    def fit(self,
            train_images,
            train_labels,
            test_images,
            test_labels,
            optimization_method=OptimizationMethod.Adam,
            optimization_params={
                'learning_rate': 0.01,
                'momentum': 0.9,
                'beta1': 0.9,
                'beta2': 0.999
            },
            iters_num=10000,
            mini_batch_size=100,
            samples_num_evaluated_per_epoc=100,
            is_activation_check=False):
        """Fits weight paramters by using optimization algorithm.
        """

        costs = []
        train_accuracies = []
        test_accuracies = []

        ITERS_NUM = iters_num
        MINI_BATCH_SIZE = mini_batch_size
        TRAIN_IMAGES_NUM = train_images.shape[0]
        ITER_PER_EPOC = max(TRAIN_IMAGES_NUM / MINI_BATCH_SIZE, 1)

        optimizer = Optimizer().optimizer(
            optimization_method=optimization_method,
            learning_rate=optimization_params.get('learning_rate'),
            momentum=optimization_params.get('momentum'),
            beta1=optimization_params.get('beta1'),
            beta2=optimization_params.get('beta2'),
        )

        for i in range(ITERS_NUM):
            batch_mask = np.random.choice(TRAIN_IMAGES_NUM, MINI_BATCH_SIZE)
            x_batch = train_images[batch_mask]
            t_batch = train_labels[batch_mask]

            grads = nn.computeGradientWithBackPropagation(
                x_batch, t_batch, is_activation_check=is_activation_check)
            optimizer.update(nn.params, grads)

            costs.append(nn.computeCost(nn.forward(x_batch), t_batch))
            print('cost {}'.format(costs[-1]))

            # check accuracy
            if i % ITER_PER_EPOC == 0:
                print('=========ITERATION {}=========='.format(i))
                if samples_num_evaluated_per_epoc is None:
                    samples_num_evaluated_per_epoc = -1
                train_accuracies.append(
                    nn.computeAccuracy(
                        train_images[:samples_num_evaluated_per_epoc],
                        train_labels[:samples_num_evaluated_per_epoc]))
                test_accuracies.append(
                    nn.computeAccuracy(
                        test_images[:samples_num_evaluated_per_epoc],
                        test_labels[:samples_num_evaluated_per_epoc]))
                print("train accuracy {}, test accuracy {}".format(
                    train_accuracies[-1], test_accuracies[-1]))

        return costs, train_accuracies, test_accuracies
コード例 #22
0
ファイル: nmt.py プロジェクト: rooa/sp2016.11-731
def main():
    src_word2idx, src_idx2word = create_word_table(train_src)
    tgt_word2idx, tgt_idx2word = create_word_table(train_tgt)
    sys.stderr.write("Lookup table constructed." + "\n")
    NMT = NMTAttention(src_embed_dim=config.src_embed_dim, tgt_embed_dim=config.tgt_embed_dim,
                       src_lstm_op_dim=config.src_lstm_op_dim, tgt_lstm_op_dim=config.tgt_lstm_op_dim,
                       src_word2idx=src_word2idx, tgt_idx2word=tgt_idx2word, beta=config.beta)
    variables = NMT.build_model1()

    # Objective, and construct a function
    updates = Optimizer(clip=5.0).adam(cost=variables['cost'], params=variables['params'])
    f_train = theano.function(inputs=[variables['src_ip'], variables['tgt_ip'], variables['tgt_op']], outputs=variables['cost'], updates=updates)
    f_eval = theano.function(inputs=[variables['src_ip'], variables['tgt_ip']], outputs=variables['proj_layer'])

    all_costs = []
    log = open('train.log', 'w')
    n_epochs = 100

    best_valid_predictions = None
    best_valid_score = -1
    best_test_predictions = None

    for epoch in xrange(n_epochs):
        ts = time.time()
        sys.stderr.write("====== Epoch %d ======" % epoch + "\n")
        # Shuffle order
        indices = range(len(train_src))
        np.random.shuffle(indices)
        train_src_sents = [train_src[i] for i in indices]
        train_tgt_sents = [train_tgt[i] for i in indices]
        costs = []

        # For all the sentences
        for i in xrange(len(train_src_sents)):
            new_cost = f_train(
                np.array([src_word2idx[w] for w in train_src_sents[i]]).astype(np.int32),
                np.array([tgt_word2idx[w] for w in train_tgt_sents[i]][:-1]).astype(np.int32),
                np.array([tgt_word2idx[w] for w in train_tgt_sents[i]][1:]).astype(np.int32),
            )
            all_costs.append((i, new_cost))
            costs.append(new_cost)

            if i % 300 == 0:
                sys.stderr.write("%d, %f" % (i, np.mean(costs)) + "\n")
                costs = []

            if i % 10000 == 0 and i != 0:
                valid_preds = get_predictions(src_word2idx, tgt_word2idx, tgt_idx2word, f_eval, mode="validation")
                bleu = get_validation_bleu(valid_preds)
                sys.stderr.write('Epoch %d BLEU on Validation : %s\n' % (epoch, bleu))
                if float(bleu) >= best_valid_score:
                    best_valid_score = float(get_validation_bleu(valid_preds))
                    best_valid_predictions = deepcopy(valid_preds)
                    best_test_predictions = deepcopy(get_predictions(src_word2idx, tgt_word2idx, tgt_idx2word, f_eval, mode="test"))
                    sys.stderr.write('Found new best validation score %f ' % (best_valid_score) + "\n")
                log.write('Epoch %d BLEU on Validation : %s \n' % (epoch, i, bleu))

        # Compute time it takes for one epoch
        te = time.time()
        sys.stderr.write('Elapsed time for one epoch: %f\n' % (te - ts))

        # Store after epoch
        fout = open('output' + str(epoch) + '.txt', 'w')
        for line in best_test_predictions:
            fout.write(' '.join(line) + '\n')
        fout.close()

    log.close()
コード例 #23
0
ファイル: model_builder.py プロジェクト: eriher/Summarizer
def build_optim(args, model, checkpoint):
    """ Build optimizer """

    if checkpoint is not None:
        optim = checkpoint['optim']
        saved_optimizer_state_dict = optim.optimizer.state_dict()
        optim.optimizer.load_state_dict(saved_optimizer_state_dict)
        if args.visible_gpus != '-1':
            for state in optim.optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()

        if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
            raise RuntimeError(
                "Error: loaded Adam optimizer from existing model" +
                " but optimizer state is empty")

    else:
        optim = Optimizer(args.optim,
                          args.lr,
                          args.max_grad_norm,
                          beta1=args.beta1,
                          beta2=args.beta2,
                          decay_method='noam',
                          warmup_steps=args.warmup_steps)

    optim.set_parameters(list(model.named_parameters()))

    return optim
    """ Build optimizer """

    if checkpoint is not None:
        optim = checkpoint['optims'][1]
        saved_optimizer_state_dict = optim.optimizer.state_dict()
        optim.optimizer.load_state_dict(saved_optimizer_state_dict)
        if args.visible_gpus != '-1':
            for state in optim.optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()

        if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
            raise RuntimeError(
                "Error: loaded Adam optimizer from existing model" +
                " but optimizer state is empty")

    else:
        optim = Optimizer(args.optim,
                          args.lr_dec,
                          args.max_grad_norm,
                          beta1=args.beta1,
                          beta2=args.beta2,
                          decay_method='noam',
                          warmup_steps=args.warmup_steps_dec)

    params = [(n, p) for n, p in list(model.named_parameters())
              if not n.startswith('bert.model')]
    optim.set_parameters(params)

    return optim
コード例 #24
0
                                        l_seeds=l_seeds,
                                        l_targets=l_targets,
                                        l_true_targets=l_true_targets,
                                        l_seeds_weight=l_seeds_weight,
                                        alpha=priorization_params["alpha"],
                                        laplacian_exponent=priorization_exponent,
                                        tol=1e-08,
                                        max_iter=priorization_params["max_iter"],
                                        max_fpr=priorization_params["max_fpr"],
                                        auroc_normalized=priorization_params["auroc_normalize"])

    test_name = evaluator.metric_name + "_PropagatorExponent" + priorization_exponent + "_" + integrator_name
    optimizer = Optimizer(optimization_name=test_name,
                         path2files=Wpath,
                         space=Optimizer.get_integrator_space(integrator=integrator),
                         objective_function=lambda sp: Optimizer.gamma_objective_function(sp,
                                                                                         evaluator=evaluator,
                                                                                         integrator=integrator),
                         max_evals=optimizer_params["max_evals"],
                         maximize=True)
    tpe_results, best = optimizer.optimize()
    tpe_results = Optimizer.normalize_network_gamma_coeficients(tpe_results, dict_of_networks.keys())
    tpe_results = tpe_results.sort_values("PPI")

    tpe_results.to_csv(Wpath+"/{}_PPI_BP_results.csv".format(test_name))
    plt.figure(figsize=(12, 7))
    plt.plot(tpe_results["PPI"], tpe_results[optimizer.__name__], '.-')
    plt.xlabel("$\gamma_{}PPI$")
    plt.ylabel(evaluator.metric_name)
    plt.hlines((auc_ppi, auc_bp), colors=(ppi_params["color"], bp_params["color"]), xmin=0, xmax=1, linestyle="dashdot")
    plt.savefig(Wpath+"/{}_PPI_BP_results.svg".format(test_name))
    plt.close()
コード例 #25
0
space = {
    "threshold": hp.uniform("threshold", 0, 1),
    "to_directed": hp.choice("to_directed", [True, False])
}

max_evals = optimizer_params["max_evals"]
for mode in ("laplacian", "one_mode_proyection"):
    test_name = "PRINCE_" + mode
    filename = Wpath + "/{}_PPI_results".format(test_name)
    space_fixed = dict()
    space_fixed["mode"] = mode

    optimizer = Optimizer(
        optimization_name=test_name,
        path2files=Wpath,
        space=space,
        objective_function=lambda space: eval_func(space, space_fixed),
        max_evals=max_evals,
        maximize=True)
    tpe_results, best = optimizer.optimize()
    tpe_results = tpe_results.sort_values("threshold")
    tpe_results.to_csv(filename + ".csv")

    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 7))
    for label, col, isdirected in zip(("directed", "original scores"),
                                      ("blue", "green"), (True, False)):
        try:
            visualizators.plot_optimization(
                x_variable="threshold",
                y_variable=evaluator.metric_name,
                optimizer=optimizer,