Exemplo n.º 1
0
def rivalProposal():
    print("rivalProposal")
    processes = ['alpha', 'beta', 'gamma']
    um = UniversalMessenger()
    m1 = MockMessenger('alpha', processes, um)
    m2 = MockMessenger('beta', processes, um)
    m3 = MockMessenger('gamma', processes, um)
    p1 = Proposer('alpha', processes, m1)
    p2 = Proposer('beta', processes, m2)
    p3 = Proposer('gamma', processes, m3)
    a1 = Acceptor(processes, m1)
    a2 = Acceptor(processes, m2)
    a3 = Acceptor(processes, m3)
    l1 = Learner(processes, m1)
    l2 = Learner(processes, m2)
    l3 = Learner(processes, m3)

    p1.prepare('ABC', 0)
    a1.promise(um.lastMessageToSite('alpha', 'prepare'))
    p1.receivePromise(um.lastMessageToSite('alpha', 'promise'))
    a2.promise(um.lastMessageToSite('beta', 'prepare'))
    p1.receivePromise(um.lastMessageToSite('alpha', 'promise'))
    #a3.promise(um.lastMessageToSite('gamma', 'prepare'))
    #p1.receivePromise(um.lastMessageToSite('alpha', 'promise'))

    #a1.accept(um.lastMessageToSite('alpha', 'accept'))
    #a2.accept(um.lastMessageToSite('beta', 'accept'))
    #a3.accept(um.lastMessageToSite('gamma', 'accept'))

    p2.prepare(
        'XYZ',
        0)  # A majority of these need to be uncommented for XYZ to be accepted
    #a1.promise(um.lastMessageToSite('alpha', 'prepare'))
    #p2.receivePromise(um.lastMessageToSite('beta', 'promise'))
    a2.promise(um.lastMessageToSite('beta', 'prepare'))
    p2.receivePromise(um.lastMessageToSite('beta', 'promise'))
    a3.promise(um.lastMessageToSite('gamma', 'prepare'))
    p2.receivePromise(um.lastMessageToSite('beta', 'promise'))

    a1.accept(um.lastMessageToSite('alpha', 'accept'))
    l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted'))
    l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted'))
    l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted'))
    a2.accept(um.lastMessageToSite('beta', 'accept'))
    l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted'))
    l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted'))
    l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted'))
    a3.accept(um.lastMessageToSite('gamma', 'accept'))
    l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted'))
    l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted'))
    l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted'))

    um.printMessages()
Exemplo n.º 2
0
    def __init__(self, args, config):
        """

        :param args:
        """
        super(Meta, self).__init__()

        self.meta_lr = args.meta_lr
        self.n_way = args.n_way
        self.k_spt = args.k_spt
        self.k_qry = args.k_qry
        self.task_num = args.task_num
        self.update_step = args.update_step
        self.update_step_test = args.update_step_test

        self.net = Learner(config, args.imgc, args.imgsz)

        # Create learnable per parameter learning rate
        self.type = args.lr_type
        if self.type == "vector":
            self.update_lr = nn.ParameterList()
            for p in self.net.parameters():
                p_lr = args.update_lr * torch.ones_like(p)
                self.update_lr.append(nn.Parameter(p_lr))
            params = list(self.net.parameters()) + list(self.update_lr)
        elif self.type == "scalar":
            self.update_lr = nn.Parameter(torch.tensor(args.update_lr))
            params = list(self.net.parameters())
            params += [self.update_lr]

        # Define outer optimizer (also optimize lr)
        self.meta_optim = optim.Adam(params, lr=self.meta_lr)
Exemplo n.º 3
0
def evaluate(f, optimizer, best_sum_loss, best_final_loss, best_flag, lr):
    print('\n --> evalute the model')
    STEPS = 100
    LSTM_learner = Learner(f,
                           optimizer,
                           STEPS,
                           eval_flag=True,
                           reset_theta=True,
                           retain_graph_flag=True)
    lstm_losses, sum_loss = LSTM_learner()
    try:
        best = torch.load('best_loss.txt')
    except IOError:
        print('can not find best_loss.txt')
        pass
    else:
        best_sum_loss = best[0]
        best_final_loss = best[1]
        print("load_best_final_loss and sum_loss")
    if lstm_losses[-1] < best_final_loss and sum_loss < best_sum_loss:
        best_final_loss = lstm_losses[-1]
        best_sum_loss = sum_loss

        print(
            '\n\n===> best of final LOSS[{}]: =  {}, best_sum_loss ={}'.format(
                STEPS, best_final_loss, best_sum_loss))
        torch.save(optimizer.state_dict(), 'best_LSTM_optimizer.pth')
        torch.save([best_sum_loss, best_final_loss, lr], 'best_loss.txt')
        best_flag = True

    return best_sum_loss, best_final_loss, best_flag
    def setUp(self):
        """Learner Testing Configuration.

        Sets up the necessary information to begin testing.

        """
        self.data_shape = 100, 20
        self.label = '`learner.Learner`'
        self.learner = Learner()
        self.n_tests = 50
        self.name = __name__
        self.shapes = {}

        p_to_shape = lambda p: p.shape
        """callable: Maps parameters to their matrix dimensions."""

        for name, ModelWrapper in self.learner:
            ModelWrapper.model = dict(X=_random_matrix(self.data_shape))

            self.shapes[name] = _compose(tuple,
                                         map)(p_to_shape,
                                              ModelWrapper._model.params)

            # Model string should indicate that all parameters are set at this
            # point.
            self.assertIsNotNone(ModelWrapper._model.params)

            del ModelWrapper._model.params

            # Model string should indicate unset parameters at this point.
            self.assertIsNone(ModelWrapper._model.params)
Exemplo n.º 5
0
def zero_shot_NOmeta(args):
    res_filename = '{}/res-0shot-NOmeta-{}.json'.format(args.model_dir, '_'.join(args.test_langs))
    if os.path.exists(res_filename):
        assert False, 'Already evaluated.'

    logger.info("********** Scheme: 0-shot NO meta learning **********")

    # build the model
    learner = Learner(args.bert_model, LABEL_LIST, args.freeze_layer, logger, lr_meta=0, lr_inner=0,
                      warmup_prop_meta=-1, warmup_prop_inner=-1, max_meta_steps=-1,
                      model_dir=args.model_dir, gpu_no=args.gpu_device, py_alias=args.py_alias).to(device)

    languages = args.test_langs
    F1s = {lang: [] for lang in languages}
    for lang in languages:
        corpus_test = Corpus('bert-base-multilingual-cased', args.max_seq_len, logger, language=lang, mode='test',
                            load_data=False, support_size=-1, base_features=None, mask_rate=-1.0,
                            compute_repr=False, shuffle=False)

        logger.info("********** Scheme: evaluate [{}] - [test] **********".format(lang))
        F1_test = learner.evaluate_NOmeta(corpus_test, args.result_dir, logger, lang=lang, mode='test')

        F1s[lang].append(F1_test)
        logger.info("===> Test F1: {}".format(F1_test))

    for lang in languages:
        logger.info('{} Test F1: {}'.format(lang, ', '.join([str(i) for i in F1s[lang]])))

    with Path(res_filename).open('w', encoding='utf-8') as fw:
        json.dump(F1s, fw, indent=4, sort_keys=True)
Exemplo n.º 6
0
    def __init__(self, model, device):

        super(RobustVis, self).__init__()
        #self.normalize = helpers.InputNormalize(dataset.mean, dataset.std)
        configtest = [
            ('conv2d', [32, 3, 3, 3, 1, 0]),
            ('relu', [True]),
            ('bn', [32]),
            ('max_pool2d', [2, 2, 0]),
            ('conv2d', [32, 32, 3, 3, 1, 0]),
            ('relu', [True]),
            ('bn', [32]),
            ('max_pool2d', [2, 2, 0]),
            ('conv2d', [32, 32, 3, 3, 1, 0]),
            ('relu', [True]),
            ('bn', [32]),
            ('max_pool2d', [2, 2, 0]),
            ('conv2d', [32, 32, 3, 3, 1, 0]),
            ('relu', [True]),
            ('bn', [32]),
            ('max_pool2d', [2, 1, 0]),
            ('flatten', [])
        ]
        
        copymod = Learner(configtest, 3, 84)#.to('cuda:3')
        for i in range(0,16):
            copymod.parameters()[i] = model.parameters()[i]
        
        
        self.model = copymod.to(device)
        self.model.eval()
        self.device = device
Exemplo n.º 7
0
    def __init__(self, args, config):
        """

        :param args:
        """
        super(Meta, self).__init__()

        self.update_lr = args.update_lr
        self.meta_lr = args.meta_lr
        self.n_way = args.n_way
        self.k_spt = args.k_spt
        self.k_qry = args.k_qry
        self.task_num = args.task_num
        self.update_step = args.update_step
        self.update_step_test = args.update_step_test
        self.meta_states = dict()
        self.iteration = 1

        self.net = Learner(config, args.imgc, args.imgsz)
        # self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
        self.optimizer = optim.EntropySGD(self.net.parameters(),
                                          config=dict(lr=1.2,
                                                      momentum=0,
                                                      nesterov=True,
                                                      weight_decay=0,
                                                      L=10,
                                                      eps=1e-4,
                                                      g0=1e-2,
                                                      g1=1e-1))
Exemplo n.º 8
0
def main():
    from learner import Learner
    from res_unet_dropout import ResUnet
    # lr=3e-5
    dprob=0.2
    epochs = 8

    trainset = Prostate_data(img_size=256, num_classes=3)
    validset = Prostate_data(dataset_type='valid', img_size=256, num_classes=3)
    datasets = {'train': trainset, 'valid': validset}

    for lr in [1e-4,5e-4,1e-3,5e-3,1e-2]:
        for gamma in [0] :
            # fig,axes = plt.subplots(nrows=1,ncols=6,figsize=(24,4))
            # imgs = []
            # for i in tqdm(range(6)):
            #     imgs.append(get_rgb(trainset[i][1]))
            # for j in range(6):
            #     axes[j].imshow(imgs[j])
            # plt.show()

            model = ResUnet(num_classes=5,dprob=dprob)
            criterion = Focalloss(gamma=gamma)
            optimizer = torch.optim.SGD(model.parameters(),lr=lr,momentum=0.9)
            scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,patience=10)
            dtime = '0057_1806'
            tb_logs = {'path':'logdirs/onevall_trials_aug/respre_SGD_plateau','comment':f'lr={lr}_gamma={gamma}_dprob={dprob}_{dtime}'}
            trainer = Learner(datasets,model,criterion,optimizer,scheduler,bs=8,num_workers=4)
            try :
                trainer.fit(tb_logs=tb_logs,epochs=epochs)
                # torch.save(trainer.model,f'logdirs/onevall_trials_aug/respre_SGD_plateau/lr={lr}_gamma={gamma}_dprob={dprob}_{dtime}/{dtime}')
            except KeyboardInterrupt:
                pass
Exemplo n.º 9
0
    def setup_master(self):
        policy1 = None
        policy2 = None
        team1 = [
        ]  ## List of Neo objects, one Neo object per agent, teams_list = [team1, teams2]
        team2 = []

        num_adversary = 0
        num_friendly = 0
        for i, agent in enumerate(self.world.policy_agents):
            if agent.attacker:
                num_adversary += 1
            else:
                num_friendly += 1

        action_space = self.action_spaces[
            i]  ##* why on earth would you put i???
        pol_obs_dim = self.observation_spaces[0].shape[
            0]  ##* ensure 27 is correct

        # index at which agent's position is present in its observation
        pos_index = 2  ##* don't know why it's a const and +2

        for i, agent in enumerate(self.world.policy_agents):
            obs_dim = self.observation_spaces[i].shape[0]

            if not agent.attacker:  # first we have the guards and then the attackers
                if policy1 is None:
                    policy1 = MPNN(input_size=pol_obs_dim,
                                   num_agents=num_friendly,
                                   num_opp_agents=num_adversary,
                                   num_entities=0,
                                   action_space=self.action_spaces[i],
                                   pos_index=pos_index,
                                   mask_dist=1.0,
                                   entity_mp=False,
                                   policy_layers=1).to(self.device)
                team1.append(
                    Neo(self.namespace_args, policy1, (obs_dim, ),
                        action_space)
                )  ## Neo adds additional features to policy such as loading model, update_rollout. Neo.actor_critic is the policy and is the same object instance within a team
            else:
                if policy2 is None:
                    policy2 = MPNN(input_size=pol_obs_dim,
                                   num_agents=num_adversary,
                                   num_opp_agents=num_friendly,
                                   num_entities=0,
                                   action_space=self.action_spaces[i],
                                   pos_index=pos_index,
                                   mask_dist=1.0,
                                   entity_mp=False).to(self.device)
                team2.append(
                    Neo(self.namespace_args, policy2, (obs_dim, ),
                        action_space))

        master = Learner(self.namespace_args, [team1, team2],
                         [policy1, policy2],
                         world=self.world)

        return master
Exemplo n.º 10
0
    def __init__(self, args, config):
        """

        :param args:
        """
        super(Meta, self).__init__()

        # self.update_lr = args.update_lr
        self.meta_lr = args.meta_lr
        self.n_way = args.n_way
        self.k_spt = args.k_spt
        self.k_qry = args.k_qry
        self.task_num = args.task_num
        self.update_step = args.update_step
        self.update_step_test = args.update_step_test
        self.reg_lambda = args.reg_lambda
        self.regularization = nn.MSELoss()

        self.net = Learner(config, args.imgc, args.imgsz)
        self.update_lrs = nn.Parameter(
            args.update_lr * torch.ones(self.update_step, len(self.net.vars)),
            requires_grad=True)
        self.meta_optim = optim.Adam(self.parameters(), lr=self.meta_lr)
        #self.CG_optim = HessianFree(self.parameters(), lr=self.meta_lr, cg_max_iter=5)
        self.lr_scheduler = optim.lr_scheduler.ExponentialLR(
            self.meta_optim, 0.9)
        self.MAX_CG = args.MAX_CG
Exemplo n.º 11
0
def main():
  dbinfo = recover()
  conn = MySQLdb.connect(**dbinfo)

  cur = conn.cursor()

  #Learn
  sql = "SELECT id,article_text,trainpos,trainneg,trainneutral FROM articles WHERE trainset=1 AND (trainpos>0 OR trainneg>0 OR trainneutral>0)"
  cur.execute(sql)
  a = Learner()
  for aid,article_text,trainpos,trainneg,trainneutral in cur.fetchall():
    aid = int(aid)
    items = [ (1, int(trainpos)),(0, int(trainneutral)),(-1, int(trainneg)) ]
    classification = max(items, key=lambda x : x[1])[0]
    a.add_string(article_text, classification)
  a.train()

  #Predict
  sql = "SELECT id,article_text FROM articles"
  cur.execute(sql)
  b = Classifier(a)
  for aid,article_text in cur.fetchall():
    aid = int(aid)
    classification = b.classify(article_text)
    sql = "UPDATE articles SET score=%s WHERE id=%s"
    args = [classification,aid]
    cur.execute(sql,args)
    print aid,classification

  conn.commit()
Exemplo n.º 12
0
 def __init__(self):
     self.learn = learn
     self.load = load
     self.learner = Learner()
     if self.load and os.path.isfile(learnerFile):
         self.learner.loadData(learnerFile)
     self.learner.newGame()
Exemplo n.º 13
0
def main(args_list: list = []):
    parser = argparse.ArgumentParser()
    # parser.add_argument('--env-type', default='gridworld')
    parser.add_argument('--env-type', default='point_robot_sparse')
    # parser.add_argument('--env-type', default='cheetah_vel')
    # parser.add_argument('--env-type', default='ant_semicircle_sparse')
    args, rest_args = parser.parse_known_args(args_list)
    env = args.env_type

    # --- GridWorld ---
    if env == 'gridworld':
        args = args_gridworld.get_args(rest_args)
    # --- PointRobot ---
    elif env == 'point_robot_sparse':
        args = args_point_robot_sparse.get_args(rest_args)
    # --- Mujoco ---
    elif env == 'cheetah_vel':
        args = args_cheetah_vel.get_args(rest_args)
    elif env == 'ant_semicircle_sparse':
        args = args_ant_semicircle_sparse.get_args(rest_args)

    set_gpu_mode(torch.cuda.is_available())

    if hasattr(args, 'save_buffer') and args.save_buffer:
        os.makedirs(args.main_save_dir, exist_ok=True)

    learner = Learner(args)

    learner.train()
def main():
    # Setup logger
    logging.basicConfig(filename='log.txt',
                        level=logging.INFO,
                        format='%(asctime)s %(levelname)s : %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S')
    logging.info('Running file : {}'.format(__file__))

    # parse command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=None, help='random seed')
    parser.add_argument('--save-to',
                        type=str,
                        C > default=None,
                        help='name of a file to save')
    args = parser.parse_args()

    # get dataset
    dataset, test_dataset, emb_weights = get_dataset(min_freq=5, test_set=True)
    dataset.fields['review'].include_lengths = True
    # the above line adds a tensor with lengths of reviews in a batch,
    # so that we can pass batches to embeddingbag
    ds_train, ds_val = dataset.split(split_ratio=[0.9, 0.1])

    if args.seed is not None:
        random.seed(args.randomseed)

    bs = 64
    logging.info('Initialising the model with the embedding layer frozen.')
    model = Baseline_model(emb_weights.clone()).to(device)
    loss_fn = nn.CrossEntropyLoss().to(device)
    optimiser = AdamW(model.parameters(), weight_decay=1e-2)
    steps_per_cycle = (len(ds_train) // bs + 1)
    scheduler = CyclicLRDecay(optimiser,
                              1e-4,
                              1e-2,
                              steps_per_cycle,
                              0.25,
                              gamma_factor=0.80)
    learner = Learner(model, loss_fn, optimiser, scheduler, ds_train, ds_val,
                      device)

    logging.info('Training')
    learner.train(epochs=6, bs=bs, grad_clip=(0.2, 0.4))

    logging.info('Unfreezing the embedding layer')
    model.embedding.weight.requires_grad_(True)
    learner.train(epochs=5, bs=bs, grad_clip=(0.2, 0.4))

    logging.info('--- Final statistics ---')
    logging.info('Training loss : {:.4f}, accuracy {:.4f}'.format(
        *validate(ds_train, loss_fn, model, bs)))
    logging.info('Test loss : {:.4f}, accuracy {:.4f}'.format(
        *validate(test_dataset, loss_fn, model, bs)))

    if args.save_to is not None:
        if not os.path.exists('models'):
            os.makedirs('models')
        logging.info('Model saved to: models/{}'.format(args['--save-to']))
        torch.save(model.state_dict(), 'models/{}'.format(args['--save-to']))
Exemplo n.º 15
0
def main():
    #Initialize Game manipulator
    gm = GameManipulator()
    gm.findGamePosition()

    # Check for found game
    if (not gm.offset):
        print 'FAILED TO FIND GAME!'
        return
    gm.focusGame()
    # Initialize UI

    global_stop_event = threading.Event()

    # Init Learner
    learner = Learner(gm, 12, 4, 0.2)
    try:
        # Initialize UI and start the game
        UI(gm, learner, global_stop_event).run()
    except KeyboardInterrupt:
        global_stop_event.set()
        learner.interuptted = True
        # clear log file
        with open('/tmp/ui.log', 'w'):
            pass
        raise SystemExit
    def __init__(self, args, config):
        """

        :param args:
        """
        super(Meta, self).__init__()

        # self.update_lr = args.update_lr
        self.meta_lr = args.meta_lr
        self.n_way = args.n_way
        self.k_spt = args.k_spt
        self.k_qry = args.k_qry
        self.task_num = args.task_num
        self.update_step = args.update_step
        self.update_step_test = args.update_step_test

        self.net = Learner(config, args.imgc, args.imgsz)
        self.update_lrs = nn.ParameterList([
            nn.Parameter(args.update_lr *
                         torch.ones(self.update_step, var.size(0)),
                         requires_grad=True) for var in self.net.vars
        ])
        self.meta_optim = optim.Adam(self.parameters(), lr=self.meta_lr)
        self.lr_scheduler = optim.lr_scheduler.ExponentialLR(
            self.meta_optim, 0.9)
Exemplo n.º 17
0
def solve_task(task, max_steps=10, workspace=Path('/tmp'), verbose=1):
    model = Learner(model_config)
    # model = load_model(model, verbose)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    losses = np.zeros(fintune_epoch)
    optimizer = torch.optim.Adam(model.parameters(), lr=fintune_lr, weight_decay=weight_decay)
    best_loss = 1000

    # task = data_augmentation(task)
    data_x = [torch.Tensor(inp2img(sample['input'])).unsqueeze(0).float().to(device) for sample in task]
    data_y = [torch.Tensor(np.array(sample['output'])).long().unsqueeze(0).to(device) for sample in task]

    for cur_epoch in range(fintune_epoch):
        loss = 0.0

        for x, y in zip(data_x, data_y):
            # predict output from input
            y_pred = model(x)
            loss += criterion(y_pred, y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        losses[cur_epoch] = loss.item()
        if best_loss > loss.item():
            best_loss = loss.item()
            torch.save({'model': model.state_dict()}, workspace / 'best_model.pth')

    model.load_state_dict(torch.load(workspace / 'best_model.pth')['model'])

    return model, losses
Exemplo n.º 18
0
def secondProposal():
    print("secondProposal")
    processes = ['alpha', 'beta', 'gamma']
    um = UniversalMessenger()
    m1 = MockMessenger('alpha', processes, um)
    m2 = MockMessenger('beta', processes, um)
    m3 = MockMessenger('gamma', processes, um)
    p1 = Proposer('alpha', processes, m1)
    p2 = Proposer('beta', processes, m2)
    p3 = Proposer('gamma', processes, m3)
    a1 = Acceptor(processes, m1)
    a2 = Acceptor(processes, m2)
    a3 = Acceptor(processes, m3)
    l1 = Learner(processes, m1)
    l2 = Learner(processes, m2)
    l3 = Learner(processes, m3)

    p1.prepare('ABC', 0)
    a1.promise(um.lastMessageToSite('alpha', 'prepare'))
    p1.receivePromise(um.lastMessageToSite('alpha', 'promise'))
    a2.promise(um.lastMessageToSite('beta', 'prepare'))
    p1.receivePromise(um.lastMessageToSite('alpha', 'promise'))
    a3.promise(um.lastMessageToSite('gamma', 'prepare'))
    p1.receivePromise(um.lastMessageToSite('alpha', 'promise'))

    a1.accept(um.lastMessageToSite('alpha', 'accept'))
    l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted'))
    l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted'))
    l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted'))
    a2.accept(um.lastMessageToSite('beta', 'accept'))
    l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted'))
    l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted'))
    l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted'))
    a3.accept(um.lastMessageToSite('gamma', 'accept'))
    l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted'))
    l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted'))
    l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted'))

    p2.prepare('XYZ', 0)
    a1.promise(um.lastMessageToSite('alpha', 'prepare'))
    p2.receivePromise(um.lastMessageToSite('beta', 'promise'))
    a2.promise(um.lastMessageToSite('beta', 'prepare'))
    p2.receivePromise(um.lastMessageToSite('beta', 'promise'))
    a3.promise(um.lastMessageToSite('gamma', 'prepare'))
    p2.receivePromise(um.lastMessageToSite('beta', 'promise'))

    um.printMessages()
def main():
    printer = Printer('log.txt')
    utils = Utils(printer)
    utils.setup_and_verify()
    utils.evaluate_baseline()
    learner = Learner(utils.learner_utils)
    learner.train_and_evaluate()
    utils.printer.print('finished!')
Exemplo n.º 20
0
def main(argv):
    domain_path = argv[1]
    problem_path = argv[2]
    if argv[0] == "-L":
        agent = Learner()
    else:
        agent = MyExecutor()
    print LocalSimulator().run(domain_path, problem_path, agent)
Exemplo n.º 21
0
def job(args, train_csv, test_csv, embeddings, cache):
    """ Reads data, makes preprocessing, trains model and records results.
        Gets args as argument and passes values of it's fields to functions."""

    data = Data(train_csv, test_csv, cache)

    # read and preprocess data
    to_cache = not args.no_cache
    data.read_embedding(embeddings, args.unk_std, args.max_vectors, to_cache)
    data.preprocess(args.tokenizer, args.var_length)
    data.embedding_lookup()

    # split train dataset
    data_iter = data.split(args.kfold, args.split_ratio, args.stratified, args.test, args.seed)

    # iterate through folds
    loss_function = nn.BCEWithLogitsLoss()
    for fold, d in enumerate(data_iter):
        print(f'\n__________ fold {fold} __________')
        # get dataloaders
        if len(d) == 2:
            train, val = d
            test = data.test
        else:
            train, val, test = d
        dataloaders = iterate(train, val, test, args.batch_size) # train, val and test dataloader

        # choose model, optimizer, lr scheduler
        model = choose_model(args.model, data.text, args.n_layers, args.hidden_dim, args.dropout)
        optimizer = choose_optimizer(filter(lambda p: p.requires_grad, model.parameters()), args)
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lrstep, gamma=0.1)
        learn = Learner(model, dataloaders, loss_function, optimizer, scheduler, args)
        learn.fit(args.epoch, args.n_eval, args.f1_tresh, args.early_stop, args.warmup_epoch, args.clip)

        # load best model
        learn.model, info = learn.recorder.load()
        # save val predictions
        y_pred, y_true, ids = learn.predict_probs()
        val_ids = [data.qid.vocab.itos[i] for i in ids]
        pred_to_csv(val_ids, y_pred, y_true)
        # choose best threshold for val predictions
        best_th, max_f1 = choose_thresh(y_pred, y_true, [0.1, 0.5, 0.01], message=True)
        learn.recorder.append_info({'best_th': best_th, 'max_f1': max_f1})


        # predict test labels
        test_label, test_prob,_, test_ids, tresh = learn.predict_labels(is_test=True, thresh=args.f1_tresh)
        if args.test:
            test_loss, test_f1, _, _, _ = learn.evaluate(learn.test_dl, args.f1_tresh)
            learn.recorder.append_info({'test_loss': test_loss, 'test_f1': test_f1}, message='Test set results: ')

        # save test predictions to submission.csv
        test_ids = [data.qid.vocab.itos[i] for i in test_ids]
        submit(test_ids, test_label, test_prob)
        record_path = learn.recorder.record(fold)  # directory path with all records
        print('\n')
    return record_path
Exemplo n.º 22
0
    def _initActors(self):
        self.proposer = Proposer(self.servers, self.id)
        self.acceptor = Acceptor(self.servers, self.id)
        self.learner = Learner(self.servers, self.id)

        # load saved
        if os.path.isfile(self.stateFileName):
            print('Loading from:', self.stateFileName)
            self._loadState()
Exemplo n.º 23
0
def train(conf):
    gan = KernelGAN(conf)
    learner = Learner()
    data = DataGenerator(conf, gan)
    for iteration in tqdm.tqdm(range(conf.max_iters), ncols=60):
        [g_in, d_in] = data.__getitem__(iteration)
        gan.train(g_in, d_in)
        learner.update(iteration, gan)
    gan.finish()
Exemplo n.º 24
0
 def evaluate(self, args):
     self.load(args)
     test_set = HBertMseEuopDataset(os.path.join(args.data_path, 'test.h5'), self.model.tokenizer)
     result, generated_text = Learner().run_eval(args, self.model, test_set)
     eval_output_dir = f"./output/{args.model}/{args.experiment_name}/"
     if args.local_rank in [-1, 0]: os.makedirs(eval_output_dir, exist_ok=True)
     with open(os.path.join(eval_output_dir, f"eval_results.txt"), 'w') as f_eval:
         f_eval.write(generated_text+'\n')
     return result    
Exemplo n.º 25
0
    def createLearner(self):
        learner = Learner(self.port + 2, self.ips, self.ip, self.num)
        if not self.logging_switch:
            learner.logging(False)
        learner.log('starting')

        learner.listen()

        learner.log('exiting')
Exemplo n.º 26
0
def estimate_kernel(img_file):
    conf = config_kernelGAN(img_file)
    kgan = KernelGAN(conf)
    learner = Learner()
    data = DataGenerator(conf, kgan)
    for iteration in tqdm.tqdm(range(conf.max_iters), ncols=70):
        [g_in, d_in, _] = data.__getitem__(iteration)
        kgan.train(g_in, d_in)
        learner.update(iteration, kgan)
    kgan.finish()
Exemplo n.º 27
0
 def __init__(self):
     super().__init__()
     # self.gv = gv
     self.learner = Learner()
     self.pg_layout = self.create_tab()
     self.tool_dock = self.create_settings_dock()
     self.create_open_settings_button()
     result_dock, self.training_plot = self.create_result_dock()
     self.create_test_unseen_data_dock(result_dock)
     self.lr = 0
Exemplo n.º 28
0
 def get_learner(self):
     return Learner(
         self.actor,
         self.critic,
         self.replay_buffer,
         num_learning_iterations=self.args.num_learning_iterations,
         episode_batch_size=self.args.episode_batch_size,
         use_gpu=self.use_gpu,
         continuous=True,
         writer=self.writer)
Exemplo n.º 29
0
def train(conf):
    sr_net = DBPISR(conf)
    learner = Learner()
    data = DataGenerator(conf, sr_net)
    for iteration in tqdm.tqdm(range(conf.max_iters), ncols=60):
        g_in = data.__getitem__(iteration)
        sr_net.train(g_in)
        learner.update(iteration, sr_net)

    sr_net.finish(data.input_image)
Exemplo n.º 30
0
def Learning_to_learn_global_training(f, optimizer, global_taining_steps,
                                      optimizer_Train_Steps, UnRoll_STEPS,
                                      Evaluate_period, optimizer_lr):
    """ Training the LSTM optimizer . Learning to learn

    Args:   
        `optimizer` : DeepLSTMCoordinateWise optimizer model
        `global_taining_steps` : how many steps for optimizer training optimizer
        `optimizer_Train_Steps` : how many step for optimizer opimitzing each function sampled from IID.
        `UnRoll_STEPS` :: how many steps for LSTM optimizer being unrolled to construct a computing graph to BPTT.
    """
    global_loss_list = []
    Total_Num_Unroll = optimizer_Train_Steps // UnRoll_STEPS
    adam_global_optimizer = torch.optim.Adam(optimizer.parameters(),
                                             lr=optimizer_lr)

    LSTM_Learner = Learner(f,
                           optimizer,
                           UnRoll_STEPS,
                           retain_graph_flag=True,
                           reset_theta=True,
                           reset_function_from_IID_distirbution=False)
    #这里考虑Batchsize代表IID的化,那么就可以不需要每次都重新IID采样

    best_sum_loss = 999999
    best_final_loss = 999999
    best_flag = False
    for i in range(global_taining_steps):

        print('\n=============> global training steps: {}'.format(i))

        for num in range(Total_Num_Unroll):

            start = timer()
            _, global_loss = LSTM_Learner(num)

            adam_global_optimizer.zero_grad()

            global_loss.backward()

            adam_global_optimizer.step()

            global_loss_list.append(global_loss.detach_())
            time = timer() - start
            print(
                '--> time consuming [{:.4f}s] optimizer train steps :  [{}] | Global_Loss = [{:.1f}]'
                .format(time, (num + 1) * UnRoll_STEPS, global_loss))

        if (i + 1) % Evaluate_period == 0:

            best_sum_loss, best_final_loss, best_flag = evaluate(
                f, optimizer, best_sum_loss, best_final_loss, best_flag,
                optimizer_lr)

    return global_loss_list, best_flag