コード例 #1
0
 def test_default_can_play_2048(self):
     n = Net()
     b = Board()
     data = b._tiles.flatten()
     output = n.run(data)
     self.assertEqual(output.shape, (n.outputs, ))
     b.move(output.argmax(), suppress_invalid=True)
コード例 #2
0
ファイル: ttt.py プロジェクト: vhalis/2048
 def __init__(
         self,
         net_number=None,
         epoch_number=None,
         experiment_name=None,
         use_naive_opponent=False,
         play_solo=False,
         **kwargs):
     self.board = BoardTTT(**kwargs)
     self.opponent = None
     if use_naive_opponent:
         self.opponent = NaiveTTTPlayer()
     elif (not play_solo
             and epoch_number is not None
             and experiment_name is not None
             and net_number is not None):
         from nets import Net
         from trainer import GeneticNetTrainer
         try:
             net_data = GeneticNetTrainer.load_single_net(
                 idx=net_number,
                 epoch_num=epoch_number,
                 experiment_name=experiment_name)
             # Hardcoding is hard
             net_hidden_sizes = (3, 3)
             # 2D Tic Tac Toe sizes
             net_inputs = 9
             net_outputs = 9
             self.opponent = Net(
                 hidden_sizes=net_hidden_sizes,
                 weights=net_data,
                 inputs=net_inputs,
                 outputs=net_outputs)
         except IOError:
             pass
コード例 #3
0
ファイル: ttt.py プロジェクト: vithar7/2048
 def __init__(self,
              net_number=None,
              epoch_number=None,
              experiment_name=None,
              use_naive_opponent=False,
              play_solo=False,
              **kwargs):
     self.board = BoardTTT(**kwargs)
     self.opponent = None
     if use_naive_opponent:
         self.opponent = NaiveTTTPlayer()
     elif (not play_solo and epoch_number is not None
           and experiment_name is not None and net_number is not None):
         from nets import Net
         from trainer import GeneticNetTrainer
         try:
             net_data = GeneticNetTrainer.load_single_net(
                 idx=net_number,
                 epoch_num=epoch_number,
                 experiment_name=experiment_name)
             # Hardcoding is hard
             net_hidden_sizes = (3, 3)
             # 2D Tic Tac Toe sizes
             net_inputs = 9
             net_outputs = 9
             self.opponent = Net(hidden_sizes=net_hidden_sizes,
                                 weights=net_data,
                                 inputs=net_inputs,
                                 outputs=net_outputs)
         except IOError:
             pass
コード例 #4
0
def get_net(name, device):
    if name == 'MNIST':
        return Net(MNIST_Net, params[name], device)
    elif name == 'FashionMNIST':
        return Net(MNIST_Net, params[name], device)
    elif name == 'SVHN':
        return Net(SVHN_Net, params[name], device)
    elif name == 'CIFAR10':
        return Net(CIFAR10_Net, params[name], device)
    else:
        raise NotImplementedError
コード例 #5
0
ファイル: trainer.py プロジェクト: wa1tzy/wa1tzy
    def train(self):
        save_path = "models/net_arcloss2.pth"
        epoch = 0
        train_data = torchvision.datasets.MNIST(root="./MNIST",
                                                download=True,
                                                train=True,
                                                transform=transforms.Compose([
                                                    transforms.ToTensor(),
                                                    transforms.Normalize(
                                                        mean=[
                                                            0.5,
                                                        ],
                                                        std=[
                                                            0.5,
                                                        ])
                                                ]))
        train_loader = data.DataLoader(train_data,
                                       shuffle=True,
                                       batch_size=100)
        net = Net().to(self.device)
        if os.path.exists(save_path):
            net.load_state_dict(torch.load(save_path))
        else:
            print("NO Param")
        optimizer = torch.optim.SGD(net.parameters(),
                                    lr=1e-3,
                                    momentum=0.9,
                                    weight_decay=0.0005)
        # optimizer = torch.optim.Adam(net.parameters())
        scheduler = lr_scheduler.StepLR(optimizer, 20, gamma=0.8)
        while True:
            feat_loader = []
            label_loader = []
            for i, (x, y) in enumerate(train_loader):
                x = x.to(self.device)
                y = y.to(self.device)
                feature, output = net.forward(x)
                loss = self.lossfn_cls(output, y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                feat_loader.append(feature)
                label_loader.append(y)

                if i % 600 == 0:
                    print("epoch:", epoch, "i:", i, "arcsoftmax_loss:",
                          loss.item())
            feat = torch.cat(feat_loader, 0)
            labels = torch.cat(label_loader, 0)
            net.visualize(feat.data.cpu().numpy(),
                          labels.data.cpu().numpy(), epoch)
            epoch += 1
            torch.save(net.state_dict(), save_path)
            scheduler.step()
            if epoch == 150:
                break
コード例 #6
0
def moons_test():
    print('-' * 10, 'moons test', '-' * 10)
    data = datasets.make_moons(n_samples=1000)
    X = data[0]
    y = np.expand_dims(data[1], 1)  # binary classification
    print('X: {}, y: {}'.format(X.shape, y.shape))

    net = Net([Linear(2, 8, act='sigmoid'),
               Linear(8, 1, act='sigmoid')],
              loss='mse')
    print(net)

    net.train(X, y, batch_size=64, epochs=100, lr=0.5)
    print('-' * 35)
コード例 #7
0
    def test_trainer_respects_net_properties(self):
        iters = 77
        h_sizes = (4, )
        i_size = 17
        o_size = 44
        t = GeneticNetTrainer(
            iterations_per_model=iters,
            hidden_sizes=h_sizes,
            inputs=i_size,
            outputs=o_size,
        )
        sizes = (i_size, ) + h_sizes + (o_size, )

        def test_net(net, num_iterations):
            self.assertEqual(num_iterations, iters)
            self.assertEqual(net.sizes, sizes)
            return (0, )

        t.test_net = test_net
        t.run_model(None)
        t2 = GeneticNetTrainer(
            iterations_per_model=iters,
            hidden_sizes=h_sizes,
            weights=Net.random_weights(sizes),
            inputs=i_size,
            outputs=o_size,
        )
        t2.test_net = test_net
        t2.run_model(None)
コード例 #8
0
def get_net_from_logs(logs):
    """
        Best tf.keras.model 
        (best: best performance on validation set out of all models in the logdir)
        used to choose optimum hyperparameters. 
        
        Arguments: 
            dataset: instance of data.Cleaned or data.Simulated
            logdir(str): path to the logging folder of the model. 
                The logging folder of a trained model contains:
                1) folder "Training", witch contains logdirs 
                of individual hyperparameter searches
                2) pickle "args.pickle", which contains a dict of the network args. 
        """
    with open(os.path.join(logdir, "args.pickle"), "rb") as f:
        args = argparse.Namespace(**pickle.load(
            f))  # loads dict and converts it to namespace
    analysis = tune.Analysis(os.path.join(logdir, "Training"))
    best_config = analysis.get_best_config(metric="valid_rmse", mode="min")
    best_logdir = analysis.get_best_logdir(metric="valid_rmse", mode="min")
    model = create_model(args=args, **best_config)
    checkpoint_folder_name = [
        s for s in os.listdir(best_logdir) if s.startswith("checkpoint")
    ][0]
    model.load_weights(
        os.path.join(best_logdir, checkpoint_folder_name, "model.h5"))
    return Net(model, args)
コード例 #9
0
ファイル: trainer.py プロジェクト: vithar7/2048
 def make_net_weights():
     return Net(
         hidden_sizes=self.net_hidden_sizes,
         weights=None,
         inputs=self.net_inputs,
         outputs=self.net_outputs,
         weight_spread=self.net_spread,
         weight_middle=self.net_middle).weights
コード例 #10
0
 def test_square_can_process_input(self):
     hidden_weights = (
         numpy.array([[3.0, 2.0, 0.5], [3.0, 0.5, 2.0], [0.5, 3.0, 2.0]]),
         numpy.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]),
     )
     n = Net(hidden_sizes=(3, ),
             weights=hidden_weights,
             inputs=3,
             outputs=3)
     # Layer one:
     # 10.5 12 10.5
     # Layer two:
     # 33 33 33
     # Softmax:
     # 0.33 0.33 0.33
     data = numpy.array([1, 2, 3])
     output = n.run(data)
     for i in output:
         self.assertAlmostEqual(i, 0.333, 3)
コード例 #11
0
    def test_can_make_nets(self):
        n = Net(hidden_sizes=(8, ), inputs=16, outputs=4)
        self.assertEqual(n.sizes, (16, 8, 4))
        self.assertEqual(len(n.weights), 2)
        self.assertEqual(n.weights[0].shape, (16, 8))
        self.assertEqual(n.weights[1].shape, (8, 4))

        self.assertRaises(ValueError, Net, 3)
        self.assertRaises(ValueError, Net, weights='haha')
        self.assertRaises(ValueError, Net, weights=(1, ))
        self.assertRaises(ValueError, Net, inputs='lol')
        self.assertRaises(ValueError, Net, outputs='rofl')
コード例 #12
0
ファイル: trainer.py プロジェクト: vithar7/2048
 def run_model(self,
               weights_test,
               weights_adversary,
               num_iterations=None,
               naive_player=None):
     """
     @return: (ModelScore_test, ModelScore_adversary) where each is a list
              of ModelScore tuples
              Or a single list of ModelScore tuples for the non-naive player
              if one is naive
     """
     num_iterations = num_iterations or self.iterations_per_model
     if naive_player == 1:
         n1 = weights_test()
     else:
         n1 = Net(
             hidden_sizes=self.net_hidden_sizes,
             weights=weights_test,
             inputs=self.net_inputs,
             outputs=self.net_outputs,
             weight_spread=self.net_spread,
             weight_middle=self.net_middle)
     if naive_player == 2:
         n2 = weights_adversary()
     else:
         n2 = Net(
             hidden_sizes=self.net_hidden_sizes,
             weights=weights_adversary,
             inputs=self.net_inputs,
             outputs=self.net_outputs,
             weight_spread=self.net_spread,
             weight_middle=self.net_middle)
     # We score both nets
     score_params = self.test_net(n1, n2, num_iterations)
     if naive_player is None:
         return score_params
     else:
         return score_params[naive_player - 1]
コード例 #13
0
ファイル: trainer.py プロジェクト: vithar7/2048
 def run_model(self, weights, weights_idx, num_iterations=None):
     """
     @return: A ModelStats tuple
     """
     num_iterations = num_iterations or self.iterations_per_model
     n = Net(hidden_sizes=self.net_hidden_sizes,
             weights=weights,
             inputs=self.net_inputs,
             outputs=self.net_outputs,
             weight_spread=self.net_spread,
             weight_middle=self.net_middle)
     score_params = self.test_net(n, num_iterations)
     return ModelStats(weights_idx=weights_idx,
                       score_parameters=score_params,
                       score=self.get_model_score(score_params))
コード例 #14
0
def main(args):
    '''
    main function
    '''

    if args.logport:
        args.logport = Dashboard(args.logport, 'dashboard')

    EPOCH_SIZE = args.num_episode*args.num_query*args.way_train
    EPOCH_SIZE_TEST = args.num_episode_test*args.num_query*args.way_test


    '''define network'''
    net = Net(args.num_in_channel, args.num_filter)
    relationNet = RelationNet(args.num_filter*2, args.num_filter, 5*5*args.num_filter, args.num_fc, args.drop_prob)
    if torch.cuda.is_available():
        net.cuda()
        relationNet.cuda()


    '''
    load model if needed
    '''
    if args.model_load_path_net!='':
        net.load_state_dict(torch.load(args.model_load_path_net))
        net.cuda()
        relationNet.load_state_dict(torch.load(args.model_load_path_relationNet))
        relationNet.cuda()
        print('model loaded')


    ''' define loss, optimizer'''
    criterion = nn.MSELoss()
    params = list(net.parameters()) + list(relationNet.parameters())
    optimizer = optim.Adam(params, lr=args.learning_rate)


    '''get data'''
    trainList = read_miniImageNet_pathonly(TESTMODE=False,
                                           miniImageNetPath='/home/fujenchu/projects/dataset/miniImageNet_Ravi/',
                                           imgPerCls=600)
    testList = read_miniImageNet_pathonly(TESTMODE=True,
                                          miniImageNetPath='/home/fujenchu/projects/dataset/miniImageNet_Ravi/',
                                          imgPerCls=600)

    scheduler = StepLR(optimizer, step_size=40, gamma=0.5)
    ''' training'''
    for epoch in range(1000):
        scheduler.step()

        running_loss = 0.0
        avg_accu_Train = 0.0
        accu_Test_stats = []

        net.train()
        relationNet.train()
        # epoch training list
        trainList_combo = Producer(trainList, args.way_train, args.num_episode, "training") # combo contains [query_label, query_path ]
        list_trainset = tnt.dataset.ListDataset(trainList_combo, loadImg)
        trainloader = list_trainset.parallel(batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True)

        for i, data in enumerate(tqdm(trainloader), 0):
        #for i, data in enumerate(trainloader, 0):

            # get inputs
            batchSize = data[0].size()[0]
            labels = torch.unsqueeze(data[0], 1)
            images = data[1:]
            images_all = torch.cat(images).permute(0, 3, 1, 2).float()

            labels_one_hot = torch.zeros(data[0].size()[0], args.way_train)
            labels_one_hot.scatter_(1, labels, 1.0)

            # wrap in Variable
            if torch.cuda.is_available():
                images_all, labels_one_hot = Variable(images_all.cuda()), Variable(labels_one_hot.cuda())
            else:
                images_all, labels_one_hot = Variable(images_all), Variable(labels_one_hot)

            # zero gradients
            optimizer.zero_grad()

            # forward + backward + optimizer
            feature_s_all_t0_p = net(images_all)
            feature_s_all_t0_p = torch.split(feature_s_all_t0_p, batchSize, 0)

            concatenatedFeat_list = [[] for _ in range(args.way_train)]
            for idx in range(args.way_train):
                concatenatedFeat_list[idx] = torch.cat((feature_s_all_t0_p[idx], feature_s_all_t0_p[-1]), 1)

            concatenatedFeat_all = torch.cat(concatenatedFeat_list, 0)
            relationScore_all = relationNet(concatenatedFeat_all)
            relationScore_list = torch.split(relationScore_all, batchSize, 0)
            relationScore = torch.cat(relationScore_list, 1)


            #loss = criterion(relationScore, labels_one_hot)
            weights = labels_one_hot.clone()
            weights[labels_one_hot == 0] = 1.0/(args.way_train)
            weights[labels_one_hot != 0] = (args.way_train-1.0)/(args.way_train)
            loss = weighted_mse_loss(relationScore, labels_one_hot, weights)/data[0].size()[0]
            loss.backward()
            optimizer.step()

            # summing up
            running_loss += loss.data[0]
            _, predicted = torch.max(relationScore.data, 1)
            labels = torch.squeeze(labels, 1)
            avg_accu_Train += (predicted == labels.cuda()).sum()
            if i % args.log_step == args.log_step-1:
                #print('[%d, %5d] train loss: %.3f  train accuracy: %.3f' % (epoch + 1, i + 1, running_loss / args.log_step, avg_accu_Train/(args.log_step*batchSize)))

                if args.logport:
                    args.logport.appendlog(running_loss / args.log_step, 'Training Loss')
                    args.logport.appendlog(avg_accu_Train/(args.log_step*batchSize), 'Training Accuracy')
                    args.logport.image((images[-1][0, :, :, :]).permute(2, 0, 1), 'query img', mode='img')
                    for idx in range(args.way_train):
                        args.logport.image((images[idx][0, :, :, :]).permute(2, 0, 1), 'support img', mode='img')

                running_loss = 0.0
                avg_accu_Train = 0.0

            if (i+1) % args.save_step == 0:
                torch.save(net.state_dict(),
                           os.path.join(args.model_path,
                                        'net-model-%d-%d.pkl' %(epoch+1, i+1)))
                torch.save(relationNet.state_dict(),
                           os.path.join(args.model_path,
                                        'relationNet-model-%d-%d.pkl' %(epoch+1, i+1)))


        net.eval()
        relationNet.eval()
        # epoch training list
        testList_combo = Producer(testList, args.way_test, args.num_episode_test, "testing") # combo contains [query_label, query_path ]
        list_testset = tnt.dataset.ListDataset(testList_combo, loadImg_testing)
        testloader = list_testset.parallel(batch_size=args.batch_size_test, num_workers=args.num_workers, shuffle=False)
        #for i, data in enumerate(tqdm(testloader), 0):
        for i, data in enumerate(testloader, 0):
            # get inputs
            batchSize = data[0].size()[0]

            labels = torch.unsqueeze(data[0], 1)
            images = data[1:]
            images_all = torch.cat(images).permute(0, 3, 1, 2).float()

            labels_one_hot = torch.zeros(batchSize, args.way_test)
            labels_one_hot.scatter_(1, labels, 1.0)

            # wrap in Variable
            if torch.cuda.is_available():
                images_all, labels_one_hot = Variable(images_all.cuda(), volatile=True), Variable(labels_one_hot.cuda(), volatile=True)
            else:
                images_all, labels_one_hot = Variable(images_all, volatile=True), Variable(labels_one_hot, volatile=True)

            # forward
            feature_s_all_t0_p = net(images_all)
            feature_s_all_t0_p = torch.split(feature_s_all_t0_p, batchSize, 0)

            concatenatedFeat_list = [[] for _ in range(args.way_test)]
            for idx in range(args.way_test):
                concatenatedFeat_list[idx] = torch.cat((feature_s_all_t0_p[idx], feature_s_all_t0_p[-1]), 1)

            concatenatedFeat_all = torch.cat(concatenatedFeat_list, 0)
            relationScore_all = relationNet(concatenatedFeat_all)
            relationScore_list = torch.split(relationScore_all, batchSize, 0)
            relationScore = torch.cat(relationScore_list, 1)


            _, predicted = torch.max(relationScore.data, 1)
            #avg_accu_Test += (predicted == torch.squeeze(labels, 1).cuda()).sum()
            accu_Test_stats.append((predicted == torch.squeeze(labels, 1).cuda()).sum()/float(batchSize))


        m, h = mean_confidence_interval(np.asarray(accu_Test_stats), confidence=0.95)
        print('[epoch %3d] test accuracy with 0.95 confidence: %.4f, +-: %.4f' % (epoch + 1, m, h))

        #avg_accu_Test = 0.0
        accu_Test_stats = []
コード例 #15
0
    result = sess.run(accuracy, feed_dict={img_input: x_data})
    return result


mnist = input_data.read_data_sets("MNIST_data", one_hot="true")

slim = tf.contrib.slim

# img_input的placeholder
img_input = tf.placeholder(tf.float32, shape=(None, 784))
img_reshape = tf.reshape(img_input, shape=(-1, 28, 28, 1))

# 载入模型
sess = tf.Session()

Conv_Net = Net.Conv_Net()

prediction = Conv_Net.net(img_reshape)

# 载入模型
ckpt_filename = './logs/model.ckpt-20000'

# 初始化所有变量
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()

# 恢复
saver.restore(sess, ckpt_filename)

print(compute_accuracy(mnist.test.images, mnist.test.labels))
コード例 #16
0
ファイル: trainer.py プロジェクト: vithar7/2048
    def __init__(self,
                 # Genetic parameters
                 breeding_algorithm=DEFAULT_BREEDING_ALGORITHM,
                 generations=DEFAULT_GENERATIONS,
                 generation_cutoff=DEFAULT_GENERATION_CUTOFF,
                 generation_size=DEFAULT_GENERATION_SIZE,
                 mutation_chance=DEFAULT_MUTATION_CHANCE,
                 mutation_difference=DEFAULT_MUTATION_DIFFERENCE,
                 # Model parameters
                 iterations_per_model=DEFAULT_MODEL_ITERATIONS,
                 # Epoch parameters
                 epoch_num=DEFAULT_EPOCH_NUM,
                 epoch_size=DEFAULT_EPOCH_SIZE,
                 experiment_name=DEFAULT_EXPERIMENT_NAME,
                 **kwargs):
        if (not isinstance(generations, int) or
                not isinstance(generation_size, int) or
                generations < 1 or generation_size < 1):
            raise ValueError('Generations and generation size must be positive'
                             ' integers greater than zero')
        if (not isinstance(generation_cutoff, float) or
                generation_cutoff <= 0.0 or generation_cutoff >= 1.0):
            raise ValueError('Generation cutoff must be a positive float '
                             ' between 0.0 and 1.0 (exclusive)')

        if (not isinstance(breeding_algorithm, basestring) or
                breeding_algorithm not in self.BREEDING_ALGORITHM_CHOICES):
            raise ValueError('Unknown net breeding algorithm: {}'.format(
                breeding_algorithm))
        if (not isinstance(mutation_chance, float) or
                not isinstance(mutation_difference, float) or
                mutation_chance < 0.0 or mutation_chance > 1.0 or
                mutation_difference < 0.0 or mutation_difference > 1.0):
            raise ValueError('Mutation chance, and mutation difference must be'
                             ' positive floats between 0.0 and 1.0'
                             ' (inclusive)')
        if (not isinstance(iterations_per_model, int) or
                iterations_per_model <= 0):
            raise ValueError('Number of iterations per model must be a positive'
                             ' integer')

        if (not isinstance(epoch_size, int) or not isinstance(epoch_num, int)
                or epoch_size <= 0 or epoch_num < 0):
            raise ValueError('Epoch size must be an integer greater than zero'
                             ' and epoch num must be a non-negative integer')
        if not experiment_name or not isinstance(experiment_name, basestring):
            raise ValueError('Epoch name must be a non-empty string')

        # Nets
        net_hidden_sizes = kwargs.pop('hidden_sizes', Net.DEFAULT_HIDDEN_SIZES)
        net_weights = kwargs.pop('weights', Net.DEFAULT_WEIGHTS)
        net_inputs = kwargs.pop('inputs', Net.DEFAULT_INPUTS)
        net_outputs = kwargs.pop('outputs', Net.DEFAULT_OUTPUTS)
        net_spread = kwargs.pop('weight_spread', Net.DEFAULT_WEIGHT_SPREAD)
        net_middle = kwargs.pop('weight_middle', Net.DEFAULT_WEIGHT_MIDDLE)
        # Test to ensure nothing breaks
        Net(hidden_sizes=net_hidden_sizes, weights=net_weights,
            inputs=net_inputs, outputs=net_outputs,
            weight_spread=net_spread, weight_middle=net_middle)

        # Debug variables
        self.debug = kwargs.pop('debug', None)
        self.get_model_score = kwargs.pop('score_algorithm',
                                          self.get_model_score)
        # If kwargs not empty, extra key words were passed
        if len(kwargs.keys()) > 0:
            raise ValueError('Unnecessary kwargs passed to GeneticNetTrainer:'
                             ' {}'.format(','.join(kwargs.keys())))

        # Genetic variables
        self.breeding_algorithm = breeding_algorithm
        self.generations = generations
        self.generation_size = generation_size
        self.generation_cutoff = generation_cutoff
        self.mutation_chance = mutation_chance
        self.mutation_difference = mutation_difference
        # Model variables
        self.iterations_per_model = iterations_per_model
        # Net variables
        self.net_hidden_sizes = net_hidden_sizes
        self.net_weights = net_weights
        self.net_inputs = net_inputs
        self.net_outputs = net_outputs
        self.net_spread = net_spread
        self.net_middle = net_middle
        # Epoch variables
        self.epoch_size = epoch_size
        self.epoch_num = epoch_num
        self.experiment_name = experiment_name
コード例 #17
0
 def test_nets_can_use_callable_weights(self):
     n = Net(weights=Net.random_weights)
     data = numpy.random.random((n.inputs, ))
     output = n.run(data)
     self.assertEqual(output.shape, (n.outputs, ))
コード例 #18
0
 def test_default_can_process_input(self):
     n = Net()
     data = numpy.random.random((n.inputs, ))
     output = n.run(data)
     self.assertEqual(output.shape, (n.outputs, ))
コード例 #19
0
def empty_network():
    return Net(D=1, H1=10, H2=20, class_count=10).cuda()
コード例 #20
0
ファイル: ttt.py プロジェクト: vithar7/2048
class Game(object):
    def __init__(self,
                 net_number=None,
                 epoch_number=None,
                 experiment_name=None,
                 use_naive_opponent=False,
                 play_solo=False,
                 **kwargs):
        self.board = BoardTTT(**kwargs)
        self.opponent = None
        if use_naive_opponent:
            self.opponent = NaiveTTTPlayer()
        elif (not play_solo and epoch_number is not None
              and experiment_name is not None and net_number is not None):
            from nets import Net
            from trainer import GeneticNetTrainer
            try:
                net_data = GeneticNetTrainer.load_single_net(
                    idx=net_number,
                    epoch_num=epoch_number,
                    experiment_name=experiment_name)
                # Hardcoding is hard
                net_hidden_sizes = (3, 3)
                # 2D Tic Tac Toe sizes
                net_inputs = 9
                net_outputs = 9
                self.opponent = Net(hidden_sizes=net_hidden_sizes,
                                    weights=net_data,
                                    inputs=net_inputs,
                                    outputs=net_outputs)
            except IOError:
                pass

    def run(self):
        last_move = None
        last_last_move = None
        player_to_move = 1
        try:
            while True:
                self.board.display()
                print("{} to move".format(
                    self.board.player_to_tile[player_to_move]))

                if player_to_move == 2 and self.opponent:
                    # AI always see themselves as player one
                    data = self.board.normalize_board(player_to_move)
                    output = self.opponent.run(data)
                    print("{} to take {}".format(
                        self.board.player_to_tile[player_to_move],
                        output.argmax()))
                    if self.board.loop(output.argmax(),
                                       player_to_move,
                                       suppress_invalid=True):
                        # 2 player game
                        player_to_move = (player_to_move % 2) + 1
                    else:
                        print(
                            "The AI made an invalid move and probably can't"
                            " find a valid move. Sorry. :(")
                        self.board.game_over(invalid_move=True)
                    continue

                s = raw_input().strip().lower()
                is_number = False
                temp_move = -1
                try:
                    temp_move = int(s)
                    is_number = True
                except ValueError:
                    pass
                print("")
                if s.startswith('m'):
                    print("Moves: {}".format(self.board.valid_moves))
                    continue
                elif s.startswith('u'):
                    if self.board.undo():
                        last_move = last_last_move
                    continue
                elif s.startswith('q'):
                    self.board.game_over()
                elif (s.startswith('h') or not is_number):
                    print("Commands:")
                    print("- [H]elp (this menu)")
                    print("- [M]oves (# of moves)")
                    print("- [U]ndo (revert last move)")
                    print("- [Q]uit (leave game)")
                    print("- Type a number to place your symbol there")
                    continue

                if self.board.loop(temp_move, player_to_move):
                    last_last_move = last_move
                    last_move = temp_move
                    player_to_move = (player_to_move % 2) + 1
        except GameOver as go:
            self.board.display()
            print("Game over! {} wins".format(
                self.board.player_to_tile.get(int(str(go)), "Nobody")))
コード例 #21
0
def main(args):
    '''
    main function
    '''

    if args.logport:
        args.logport = Dashboard(args.logport, 'dashboard')

    EPOCH_SIZE = args.num_episode * args.num_query * args.way_train
    EPOCH_SIZE_TEST = args.num_episode_test * args.num_query * args.way_test
    SM_CONSTANT = 50
    '''define network'''
    net = Net(args.num_in_channel, args.num_filter)
    if torch.cuda.is_available():
        net.cuda()
    '''
    load model if needed
    '''
    if args.model_load_path_net != '':
        net.load_state_dict(torch.load(args.model_load_path_net))
        net.cuda()
        print('model loaded')
    ''' define loss, optimizer'''
    criterion = nn.CrossEntropyLoss()
    params = list(net.parameters())
    optimizer = optim.Adam(params, lr=args.learning_rate)
    '''get data'''
    trainList = read_miniImageNet_pathonly(
        TESTMODE=False,
        miniImageNetPath='/media/fujenchu/data/miniImageNet_Ravi/',
        imgPerCls=600)
    testList = read_miniImageNet_pathonly(
        TESTMODE=True,
        miniImageNetPath='/media/fujenchu/data/miniImageNet_Ravi/',
        imgPerCls=600)

    scheduler = StepLR(optimizer, step_size=40, gamma=0.5)
    ''' training'''
    for epoch in range(1000):
        scheduler.step()

        running_loss = 0.0
        avg_accu_Train = 0.0
        accu_Test_stats = []

        net.train()
        # epoch training list
        trainList_combo = Producer(
            trainList, args.way_train, args.num_episode,
            "training")  # combo contains [query_label, query_path ]
        list_trainset = tnt.dataset.ListDataset(trainList_combo, loadImg)
        trainloader = list_trainset.parallel(batch_size=args.batch_size,
                                             num_workers=args.num_workers,
                                             shuffle=True)

        for i, data in enumerate(tqdm(trainloader), 0):
            #for i, data in enumerate(trainloader, 0):

            # get inputs
            batchSize = data[0].size()[0]
            labels = torch.unsqueeze(data[0], 1)
            images = data[1:]
            images_all = torch.cat(images).permute(0, 3, 1, 2).float()

            labels_one_hot = torch.zeros(data[0].size()[0], args.way_train)
            labels_one_hot.scatter_(1, labels, 1.0)

            # wrap in Variable
            if torch.cuda.is_available():
                images_all, labels = Variable(images_all.cuda()), Variable(
                    labels.cuda())
            else:
                images_all, labels = Variable(images_all), Variable(labels)

            # zero gradients
            optimizer.zero_grad()

            # forward + backward + optimizer
            feature_s_all_t0_p = net(images_all)
            feature_s_all_t0_p = torch.split(feature_s_all_t0_p, batchSize, 0)

            cosineDist_list = [[] for _ in range(args.way_train)]

            for idx in range(args.way_train):
                cosineDist_list[idx] = SM_CONSTANT * torch.sum(torch.mul(
                    feature_s_all_t0_p[-1].div(
                        torch.norm(
                            feature_s_all_t0_p[-1], p=2, dim=1,
                            keepdim=True).expand_as(feature_s_all_t0_p[-1])),
                    feature_s_all_t0_p[idx].div(
                        torch.norm(
                            feature_s_all_t0_p[idx], p=2, dim=1,
                            keepdim=True).expand_as(feature_s_all_t0_p[idx]))),
                                                               dim=1,
                                                               keepdim=True)
            cosineDist_all = torch.cat(cosineDist_list, 1)

            labels = labels.squeeze(1)
            loss = criterion(cosineDist_all, labels)
            loss.backward()
            optimizer.step()

            # summing up
            running_loss += loss.data[0]
            _, predicted = torch.max(cosineDist_all.data, 1)
            avg_accu_Train += (predicted == labels.data).sum()
            if i % args.log_step == args.log_step - 1:
                #print('[%d, %5d] train loss: %.3f  train accuracy: %.3f' % (epoch + 1, i + 1, running_loss / args.log_step, avg_accu_Train/(args.log_step*batchSize)))

                if args.logport:
                    args.logport.appendlog(running_loss / args.log_step,
                                           'Training Loss')
                    args.logport.appendlog(
                        avg_accu_Train / (args.log_step * batchSize),
                        'Training Accuracy')
                    args.logport.image(
                        (images[-1][0, :, :, :]).permute(2, 0, 1),
                        'query img',
                        mode='img')
                    for idx in range(args.way_train):
                        args.logport.image(
                            (images[idx][0, :, :, :]).permute(2, 0, 1),
                            'support img',
                            mode='img')

                running_loss = 0.0
                avg_accu_Train = 0.0

            if (i + 1) % args.save_step == 0:
                torch.save(
                    net.state_dict(),
                    os.path.join(args.model_path,
                                 'net-model-%d-%d.pkl' % (epoch + 1, i + 1)))

        net.eval()
        # epoch training list
        testList_combo = Producer(
            testList, args.way_test, args.num_episode_test,
            "testing")  # combo contains [query_label, query_path ]
        list_testset = tnt.dataset.ListDataset(testList_combo, loadImg_testing)
        testloader = list_testset.parallel(batch_size=args.batch_size_test,
                                           num_workers=args.num_workers,
                                           shuffle=False)
        #for i, data in enumerate(tqdm(testloader), 0):
        for i, data in enumerate(testloader, 0):
            # get inputs
            batchSize = data[0].size()[0]

            labels = torch.unsqueeze(data[0], 1)
            images = data[1:]
            images_all = torch.cat(images).permute(0, 3, 1, 2).float()

            labels_one_hot = torch.zeros(batchSize, args.way_test)
            labels_one_hot.scatter_(1, labels, 1.0)

            # wrap in Variable
            if torch.cuda.is_available():
                images_all, labels = Variable(
                    images_all.cuda(), volatile=True), Variable(labels.cuda(),
                                                                volatile=True)
            else:
                images_all, labels = Variable(
                    images_all, volatile=True), Variable(labels, volatile=True)

            # forward
            feature_s_all_t0_p = net(images_all)
            feature_s_all_t0_p = torch.split(feature_s_all_t0_p, batchSize, 0)

            cosineDist_list = [[] for _ in range(args.way_train)]

            for idx in range(args.way_train):
                cosineDist_list[idx] = SM_CONSTANT * torch.sum(torch.mul(
                    feature_s_all_t0_p[-1].div(
                        torch.norm(
                            feature_s_all_t0_p[-1], p=2, dim=1,
                            keepdim=True).expand_as(feature_s_all_t0_p[-1])),
                    feature_s_all_t0_p[idx].div(
                        torch.norm(
                            feature_s_all_t0_p[idx], p=2, dim=1,
                            keepdim=True).expand_as(feature_s_all_t0_p[idx]))),
                                                               dim=1,
                                                               keepdim=True)
            cosineDist_all = torch.cat(cosineDist_list, 1)

            _, predicted = torch.max(cosineDist_all.data, 1)
            accu_Test_stats.append(
                (predicted == torch.squeeze(labels, 1).data.cuda()).sum() /
                float(batchSize))

            equality = (predicted != torch.squeeze(labels, 1).data.cuda())
            equality_s = (predicted == torch.squeeze(labels, 1).data.cuda())
            equality_idx = equality.nonzero()
            equality_idx_s = equality_s.nonzero()

            if i % args.log_step == args.log_step - 1:
                if args.logport:
                    pred_np = predicted.cpu().numpy()
                    labels_np = labels.cpu().data.numpy()

                    batch_idx = equality_idx[0].cpu().numpy().astype(int)
                    bb = batch_idx[0]
                    args.logport.image(
                        (images[-1][bb, :, :, :]).permute(2, 0, 1),
                        np.array_str(labels_np[bb]) +
                        np.array_str(pred_np[bb]) + ' query img',
                        mode='img-test')
                    support_image = []
                    for idx in range(args.way_train):
                        support_image.append(images[idx][bb, :, :, :].permute(
                            2, 0, 1))
                    args.logport.image(torch.cat(support_image, 2),
                                       'support img',
                                       mode='img-test')

                    batch_idx = equality_idx_s[0].cpu().numpy().astype(int)
                    bb = batch_idx[0]
                    args.logport.image(
                        (images[-1][bb, :, :, :]).permute(2, 0, 1),
                        np.array_str(labels_np[bb]) +
                        np.array_str(pred_np[bb]) + ' query img',
                        mode='img-test')
                    support_image = []
                    for idx in range(args.way_train):
                        support_image.append(images[idx][bb, :, :, :].permute(
                            2, 0, 1))
                    args.logport.image(torch.cat(support_image, 2),
                                       'support img',
                                       mode='img-test')

        m, h = mean_confidence_interval(np.asarray(accu_Test_stats),
                                        confidence=0.95)
        print(
            '[epoch %3d] test accuracy with 0.95 confidence: %.4f, +-: %.4f' %
            (epoch + 1, m, h))

        #avg_accu_Test = 0.0
        accu_Test_stats = []
コード例 #22
0
ファイル: ttt.py プロジェクト: vhalis/2048
class Game(object):

    def __init__(
            self,
            net_number=None,
            epoch_number=None,
            experiment_name=None,
            use_naive_opponent=False,
            play_solo=False,
            **kwargs):
        self.board = BoardTTT(**kwargs)
        self.opponent = None
        if use_naive_opponent:
            self.opponent = NaiveTTTPlayer()
        elif (not play_solo
                and epoch_number is not None
                and experiment_name is not None
                and net_number is not None):
            from nets import Net
            from trainer import GeneticNetTrainer
            try:
                net_data = GeneticNetTrainer.load_single_net(
                    idx=net_number,
                    epoch_num=epoch_number,
                    experiment_name=experiment_name)
                # Hardcoding is hard
                net_hidden_sizes = (3, 3)
                # 2D Tic Tac Toe sizes
                net_inputs = 9
                net_outputs = 9
                self.opponent = Net(
                    hidden_sizes=net_hidden_sizes,
                    weights=net_data,
                    inputs=net_inputs,
                    outputs=net_outputs)
            except IOError:
                pass

    def run(self):
        last_move = None
        last_last_move = None
        player_to_move = 1
        try:
            while True:
                self.board.display()
                print("{} to move".format(
                    self.board.player_to_tile[player_to_move]))

                if player_to_move == 2 and self.opponent:
                    # AI always see themselves as player one
                    data = self.board.normalize_board(player_to_move)
                    output = self.opponent.run(data)
                    print("{} to take {}".format(
                        self.board.player_to_tile[player_to_move],
                        output.argmax()))
                    if self.board.loop(
                            output.argmax(),
                            player_to_move,
                            suppress_invalid=True):
                        # 2 player game
                        player_to_move = (player_to_move % 2) + 1
                    else:
                        print("The AI made an invalid move and probably can't"
                              " find a valid move. Sorry. :(")
                        self.board.game_over(invalid_move=True)
                    continue

                s = raw_input().strip().lower()
                is_number = False
                temp_move = -1
                try:
                    temp_move = int(s)
                    is_number = True
                except ValueError:
                    pass
                print("")
                if s.startswith('m'):
                    print("Moves: {}".format(self.board.valid_moves))
                    continue
                elif s.startswith('u'):
                    if self.board.undo():
                        last_move = last_last_move
                    continue
                elif s.startswith('q'):
                    self.board.game_over()
                elif (s.startswith('h')
                        or not is_number):
                    print("Commands:")
                    print("- [H]elp (this menu)")
                    print("- [M]oves (# of moves)")
                    print("- [U]ndo (revert last move)")
                    print("- [Q]uit (leave game)")
                    print("- Type a number to place your symbol there")
                    continue

                if self.board.loop(temp_move, player_to_move):
                    last_last_move = last_move
                    last_move = temp_move
                    player_to_move = (player_to_move % 2) + 1
        except GameOver as go:
            self.board.display()
            print("Game over! {} wins".format(
                self.board.player_to_tile.get(int(str(go)),
                                              "Nobody")))