예제 #1
0
    def build(self, optimizer='Adam'):
        # build encoder
        enc_input = Input(shape=self.input_shape)
        enc_nn = NN(self.encoder_network)
        enc_out = enc_nn.build(enc_input)
        code_mean = Dense(self.code_size)(enc_out)
        code_var = Dense(self.code_size)(enc_out)
        code = Add()([
            code_mean,
            Multiply()(
                [Lambda(K.exp)(code_var),
                 Lambda(random_normal)(code_var)])
        ])
        enc_obj = Subtract()([
            Add()([Lambda(K.square)(code_mean),
                   Lambda(K.exp)(code_var)]), code_var
        ])
        self.encoder = Model(enc_input, [code, enc_obj], name='encoder')

        # build decoder
        dec_input = Input(shape=[self.code_size])
        dec_nn = NN(self.decoder_network)
        dec_out = dec_nn.build(dec_input)
        self.decoder = Model(dec_input, dec_out, name='decoder')

        # compose VAE
        real_img = Input(shape=self.input_shape)
        z, z_obj = self.encoder(real_img)
        reconstruct_img = self.decoder(z)
        self.vae = Model(inputs=[real_img], outputs=[reconstruct_img, z_obj])
        self.vae.compile(optimizer=optimizer,
                         loss=[self.reconstruct_loss, self.encoding_loss],
                         loss_weights=self.loss_weights)
예제 #2
0
    def __init__(self, game, numData):
        self.n = game.n
        self.K = game.K
        self.seed = game.seed
        self.numData = numData

        self.Nstrat = self.K
        self.Ntest = 500000
        self.lr = 1e-1
        self.nepoch = 20
        self.nstep = 10
        self.lb = -1
        self.ub = 1

        np.random.seed(self.seed)
        torch.manual_seed(self.seed)

        self.target = game.f
        self.CF_model = NN(self.K, 10, 20, True)
        self.GD_model = NN(self.K, 10, 20, True)
        self.historyCF = np.zeros(4)
        self.historyGD = np.zeros(4)

        self.xTest = torch.randn(self.Ntest, self.n, self.K)
        testScore = self.target.forward(self.xTest)
        testProb = torch.nn.functional.softmax(testScore, dim=1)
        self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
예제 #3
0
    def __init__(self, nn=None):
        """ 
        Initialize blob by inheriting ParentSprite and assigning attributes

        Args:
            nn (class): can pass in the neural net from another blob
        """
        super(Blob, self).__init__()  #values are not needed
        self.int_center = int(self.center_x), int(self.center_y)
        self.radius = 10
        self.angle = random.uniform(0, 2 * np.pi)
        self.energy = MAX_ENERGY
        self.alive = True
        self.food_eaten = 0
        self.score_int = 0

        self.sight_angle = 10 * (np.pi / 180.)
        self.sight_radius = 1000

        self.target_blob = self
        self.target_food = self

        self.last_angle = .01

        #scoring related
        self.dist_moved = 0
        self.color = int(self.energy / 4 + 5)

        # Neural Network stuff here:
        if nn is not None:
            self.nn = NN(((1, nn), ))
        else:
            self.nn = NN()
예제 #4
0
  def test_all(self, n):
    _dbn=DBN([784,1000,500,250,30],learning_rate=0.01,cd_k=1)
    _dbn.pretrain(mnist.train.images,128,50)

    _nnet = NN([784, 1000, 500, 250, 30, 250, 500, 1000, 784], 0.01, 128, 50)
    _nnet.load_from_dbn_to_reconstructNN(_dbn)
    _nnet.train(mnist.train.images, mnist.train.images)
    _nnet.test_linear(mnist.test.images, mnist.test.images)

    x_in = mnist.test.images[:30]
    _predict = _nnet.predict(x_in)
    _predict_img = np.concatenate(np.reshape(_predict, [-1, 28, 28]), axis=1)
    x_in = np.concatenate(np.reshape(x_in, [-1, 28, 28]), axis=1)
    img = Image.fromarray(
        (1.0-np.concatenate((_predict_img, x_in), axis=0))*255.0)
    img = img.convert('L')
    img.save(str(n)+'_.jpg')
    img2 = Image.fromarray(
        (np.concatenate((_predict_img, x_in), axis=0))*255.0)
    img2 = img2.convert('L')
    img2.save(str(n)+'.jpg')

    nnet_encoder=NN()
    nnet_encoder.load_layers_from_NN(_nnet,0,4)
    # featrue=nnet_encoder.predict(mnist.test.images)
    nnet_decoder=NN()
    nnet_decoder.load_layers_from_NN(_nnet,5,8)
예제 #5
0
 def __init__(self):
     self.env = gym.make("BreakoutNoFrameskip-v4")
     self.env = wrap_deepmind(self.env, frame_stack=True, scale=True)
     self.replay_size = 40
     # setup baseline
     # baseline will determine which replay pack the replay will be put into
     self.baseline = RecentAvg(size=HydrAI.HEADS_N *
                                    self.replay_size, init=0)
     self.baseline_mid = 0
     self.baseline_range = 0
     self.replays = {
         "good": ReplayPack(self.replay_size),
         "normal": ReplayPack(self.replay_size),
         "bad": ReplayPack(self.replay_size)
     }
     feature_size = self.env.observation_space.shape
     action_size = self.env.action_space.n
     self.nns = {
         "good": NN(feature_size, action_size,
                    [partial(self.replays["good"].sample, 32)],
                    "good_"),
         "normal": NN(feature_size, action_size,
                      [partial(self.replays["normal"].sample, 32)],
                      "normal_"),
         "bad": NN(feature_size, action_size,
                   [partial(self.replays["bad"].sample, 32)],
                   "bad_")
     }
     self.a = list(range(action_size))
예제 #6
0
    def __init__(self, seed, n, K, K2, K3, target, numData):
        self.n = n
        self.K = K
        self.K2 = K2
        self.K3 = K3
        self.seed = seed
        self.numData = numData

        self.Nstrat = self.K
        self.Ntest = 500000
        self.lr = 1e-1
        self.nepoch = 20
        self.nstep = 10
        self.lb = -1
        self.ub = 1


        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        self.target = target

        self.CF_model = NN(K, K2, K3, True)
        self.GD_model = NN(K, K2, K3, True)
        self.historyCF = np.zeros((self.numData.size, 4))
        self.historyGD = np.zeros((self.numData.size, 4))

        self.xTest = torch.randn(self.Ntest, self.n, self.K)
        testScore = self.target.forward(self.xTest)
        testProb = torch.nn.functional.softmax(testScore, dim=1)
        self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
예제 #7
0
 def learn(self):
     self.bound = self.getCFBound()
     for datasizei in range(self.numData.size):
         self.Ntrain = self.numData[datasizei]
         self.batch_size = int(self.Ntrain / self.nepoch)
         self.GD_model = NN(self.K, self.K2, self.K3, True)
         self.learnGD(datasizei)
         np.savetxt("historyGD"+str(self.n)+"_"+str(self.K)+"_"+str(self.seed)+".csv", self.historyGD, delimiter=',')
     for datasizei in range(self.numData.size):
         self.Ntrain = self.numData[datasizei]
         self.CF_model = NN(self.K, self.K2, self.K3, True)
         self.learnCF(datasizei)
         np.savetxt("historyCF"+str(self.n)+"_"+str(self.K)+"_"+str(self.seed)+".csv", self.historyCF, delimiter=',')
예제 #8
0
    def __init__(self, game, residual_layers=5):
        """
        Args:
            game: A Game object
            residual_layers(int): number of residual layers. Default is 5
        """
        self.game = game
        input_shape = game.layers().shape
        policy_shape = len(game.action_space)

        self.nnet_1 = NN(input_shape, residual_layers, policy_shape, True)
        self.path_1 = './model/checkpoint/' + 'old.ckpt'
        self.nnet_2 = NN(input_shape, residual_layers, policy_shape, True)
        self.path_2 = './model/checkpoint/' + 'new.ckpt'
예제 #9
0
 def __init__(self, health, speed, coords, dna):
     Creature.__init__(self, health, speed, coords)
     self.actions = []
     self.lifespan = 0
     self.score = 0
     self.action_nn = NN(2, 2, 2)
     self.move_nn = NN(2, 1, 2)
     if (dna is None):
         self.move_nn.set_random_NN_weights()
         self.dna = self.move_nn.get_weights()
     else:
         print(dna, end="\n")
         print(dna)
         self.move_nn.set_NN_weights()
         self.dna = dna
예제 #10
0
    def test_another_rbmtrain(self, n):
        _dbn = DBN([784, 1000, 500, 250, 30], learning_rate=0.01, cd_k=1)
        print(len(mnist.train.images))
        for j in range(5):
            for i in range(10):
                _dbn.pretrain(mnist.train.images[i * 5500:i * 5500 + 5500],
                              128, 5)

        _nnet = NN([784, 1000, 500, 250, 30, 250, 500, 1000, 784], 0.01, 128,
                   50)
        _nnet.load_from_dbn_to_reconstructNN(_dbn)
        _nnet.train(mnist.train.images, mnist.train.images)
        _nnet.test_linear(mnist.test.images, mnist.test.images)

        x_in = mnist.test.images[:30]
        _predict = _nnet.predict(x_in)
        _predict_img = np.concatenate(np.reshape(_predict, [-1, 28, 28]),
                                      axis=1)
        x_in = np.concatenate(np.reshape(x_in, [-1, 28, 28]), axis=1)
        img = Image.fromarray((1.0 - np.concatenate(
            (_predict_img, x_in), axis=0)) * 255.0)
        img = img.convert('L')
        img.save(str(n) + '_.jpg')
        img2 = Image.fromarray((np.concatenate(
            (_predict_img, x_in), axis=0)) * 255.0)
        img2 = img2.convert('L')
        img2.save(str(n) + '.jpg')
예제 #11
0
파일: utilities.py 프로젝트: fe9nman/AI
def GridSearch(epochs, trainloader, testloader, num_sample, input_dim,
               OUTPUT_DIM, HIDDEN_DIMS, LRS, L2_LAMBD):
    best_params = {}
    best_acc = -1
    for hidden_dim in HIDDEN_DIMS:
        for LR in LRS:
            for lambd in L2_LAMBD:
                model = NN(num_sample,
                           input_dim,
                           hidden_dim,
                           OUTPUT_DIM,
                           init_method='He')
                costs = model.train(trainloader, LR, lambd, epochs)
                acc = Accuracy(model.predict(testloader['X']), testloader['Y'])
                if acc > best_acc:
                    best_acc = acc
                    best_params['hidden_dim'] = hidden_dim
                    best_params['learning_rate'] = LR
                    best_params['L2_lambd'] = lambd
                    best_params['costs'] = costs
                    best_params['params'] = model.params
                print(
                    'GridSearching: Hidden_dim: {:d}, Learning Rate: {:f}, L2 lambda: {:f} ---> Accuracy: {:f}.'
                    .format(hidden_dim, LR, lambd, acc))
    return best_params
예제 #12
0
def main():
    train_images, train_labels, test_images, test_labels = load_mnist()
    X = normalize(train_images)
    label_size = len(np.unique(train_labels))
    y = one_hot_vector(train_labels, label_size)

    print("Total training example:", X.shape[0])

    nn = NN(epoch=20, batch_size=256)

    nn.add_layer(Layer(784))
    nn.add_layer(Layer(200, activation_fn=relu))
    nn.add_layer(Layer(100, activation_fn=relu))
    nn.add_layer(Layer(10, activation_fn=softmax))

    nn.fit(X, y)


    print("Train Accuracy is:", nn.accuracy(X, y))

    X_test = normalize(test_images)
    Y_test = one_hot_vector(test_labels, label_size)
    print("Test Accuracy is:", nn.accuracy(X_test, Y_test))

    nn.plot_learning_curve()
예제 #13
0
파일: main.py 프로젝트: VNGResearch/doc2vec
    def __init__(self, doc2vec):
        super(NNClassifier, self).__init__(doc2vec)

        self.nn_des = {
            'layer_description': [
                {
                    'name': 'input',
                    'unit_size': 100,
                },
                {
                    'name': 'hidden1',
                    'active_fun': tf.nn.relu,
                    'unit_size': 400,
                },
                {
                    'name': 'output',
                    'active_fun': None,
                    'unit_size': 59,
                },
            ],
        }
        self.max_pass = 5000
        self.batch_size = 10000
        self.step_to_report_loss = 5
        self.step_to_eval = 10
        self.nn_model = NN(self.nn_des)
        self.learning_rate = 0.01
    def __init__(self, game, numData):
        self.n = game.n
        self.K = game.K
        self.K2 = game.K2
        self.K3 = game.K3
        self.seed = game.seed
        self.numData = numData

        self.Ntest = 500000
        self.lr = 1e-1
        self.nepoch = 30
        self.nstep = 30
        self.lb = -1
        self.ub = 1

        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        self.target = copy.deepcopy(game.f)
        self.target.train = True
        self.learn_model = NN(game.K, game.K2, game.K3, True)
        self.history = np.zeros(4)

        self.xTest = torch.randn(self.Ntest, self.n, self.K)
        testScore = self.target.forward(self.xTest)
        testProb = torch.nn.functional.softmax(testScore, dim=1)
        self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
예제 #15
0
def study_ppal_components(n_training_img, k_ppal_components):
    """
    Show the principal components of the NN
    :param n_training_img: Number of training images per person to use
    :param k_ppal_components: Number of principal components to use in the NN
    """
    train_img, train_labels, test_img, test_labels = load_images(
        n_training_img)
    nearest_neighbor = NN()
    nearest_neighbor.train(train_img, train_labels, k_ppal_components)

    sqrt = math.sqrt(k_ppal_components)
    rows = sqrt if sqrt == int(sqrt) else int(sqrt) + 1
    i = 0
    for eigenface in nearest_neighbor.eigenfaces:
        i += 1
        if i > rows * int(sqrt):
            break
        plt.subplot(int(sqrt), rows, i)
        plt.imshow(shape_image(eigenface), cmap="gray")
        plt.xticks([])
        plt.yticks([])

    plt.subplots_adjust(wspace=0, hspace=0)
    # plt.suptitle(f'Eigenvectors. {n_training_img} training images, {k_ppal_components} eigenfaces')

    # plt.title("Eigenfaces used")
    plt.show()
예제 #16
0
    def eat_food(self, model):
        """ 
        tests whether or not a blob eats food on a given frame. If a blob 
        eats food, remove the food, increase the blob's energy, asexually 
        reproduce based on its neural net dna, and do some population control.

        Args:
            model (object): contains attributes of the environment

        """
        for i in range(len(model.foods) - 1, -1, -1):
            f = model.foods[i]
            if self.intersect(f):
                self.food_eaten += 1
                self.energy += 500

                if self.energy > MAX_ENERGY:
                    self.energy = MAX_ENERGY

                del model.foods[i]

                model.foods.append(Food())

                model.blobs.append(Blob(NN([(1, self.nn)])))

                if len(model.blobs) > BLOB_NUM:
                    energy_list = []
                    for blob in model.blobs:
                        energy_list.append(blob.energy)
                    del model.blobs[np.argmin(energy_list)]
    def __init__(self, seed, n, K):
        self.n = n
        self.K = K
        self.Kd = int(K * 2 / 3)
        self.Kc = int(K * 1 / 3)
        self.seed = seed
        np.random.seed(seed)
        self.eps = 1e-8
        self.CfeatureWeights = np.random.rand(self.Kc) - 0.5
        self.DfeatureWeights = np.random.rand(self.Kd) - 0.5
        for k in range(self.Kc):
            if self.CfeatureWeights[k] == 0:
                self.CfeatureWeights[k] = self.eps
        for k in range(self.Kd):
            if self.DfeatureWeights[k] == 0:
                self.DfeatureWeights[k] = self.eps
        self.nodes = [
            Node(K, self.CfeatureWeights, self.DfeatureWeights, seed)
            for i in range(n)
        ]
        self.us = np.array([node.u for node in self.nodes])
        maxCost = 0
        for i in range(n):
            maxCost += self.nodes[i].getMaxCost()
        self.budget = np.random.rand() * maxCost * 0.2

        self.f = NN(K, 10, 20, False)
        self.allWeights = np.concatenate(
            [self.CfeatureWeights, self.DfeatureWeights])
        self.f.input_linear.weight = torch.nn.Parameter(
            torch.unsqueeze(torch.tensor(self.allWeights, dtype=torch.float),
                            dim=0))
예제 #18
0
파일: lab4.py 프로젝트: znifer/archive
    def __init__(self):
        QMainWindow.__init__(self)
        loadUi('mainwindow.ui', self)
        self.inputLetter.clicked.connect(self.showInputWidget)
        # Сколько строк и столбцов в поле ввода
        self.inputSize = 5

        self.hiddenLayerSize = 12

        self.outputSize = 5

        self.iterations = 1000
        self.lr = 0.3
        self.data = Data()
        self.data.generate_symbols(4, 1)

        self.inputtedLetter = [0 for i in range(self.inputSize ** 2)]

        self.nn = NN(self.inputSize ** 2,
                     self.outputSize,
                     self.hiddenLayerSize,
                     self.iterations,
                     self.data,
                     self.lr
                     )
예제 #19
0
 def __init__(self, nn):
     if type(nn) == type(OrderedDict()):
         self._nn = NN(nn['player'])
     else:
         self._nn = nn
     self._x = np.zeros(shape=(NX, ))
     self._epsSame = 1e-2
     self._rand = random.Random()
예제 #20
0
 def decode(genome):
     dim = [784]
     for neurons in genome['nb_neurons']:
         dim.append(neurons)
     dim.append(10)
     rnd = random.randint(0, 2)
     model = NN(genome['activation'][rnd], dim)
     return model
예제 #21
0
파일: driver.py 프로젝트: Jmmxp/ml-rps
def main():
    playTournament(
        numOfGames=200,
        players=[NN(),
                 Rotate(),
                 BeatPrevious(),
                 Probability(),
                 Conditional()])
예제 #22
0
    def test_conv_in_computational_graph(self):
        nn = NN(3)
        for param in nn.parameters():
            assert param.requires_grad

        afl = AffineCouplingLayer(3)
        for param in afl.parameters():
            assert param.requires_grad
예제 #23
0
 def __init__(self, config, vocabulary):
     self.config = config
     self.vocabulary = vocabulary
     self.is_train = True if config.phase == 'train' else False
     self.nn = NN(config)
     self.global_step = tf.Variable(0, name='global_step', trainable=False)
     self.encode_state1, self.encode_state2 = None, None
     self.build()
예제 #24
0
 def __init__(self, config):
     self.config = config
     self.is_train = True if config.phase == 'train' else False
     self.train_cnn = self.is_train and config.train_cnn
     self.image_loader = ImageLoader()
     self.image_shape = [224, 224, 3]
     self.nn = NN(config)
     self.global_step = tf.Variable(0, name='global_step', trainable=False)
     self.build()
 def __init__(self, config):
     self.config = config
     self.is_train = True if config.phase == 'train' else False
     self.image_shape = [
         config.batch_size, config.time_step, config.fearute_size
     ]  # input shape
     self.nn = NN(config)  # Base cnn unit
     self.global_step = tf.Variable(0, name='global_step', trainable=False)
     self.build()  #Run building method
예제 #26
0
def train_step():
    mnist = input_data.read_data_sets("{}/data".format(
        os.path.abspath(os.path.dirname(__file__))),
                                      one_hot=True)
    config = Config()
    eval_config = Config()
    eval_config.keep_prob = 1.0
    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True

    with tf.Graph().as_default(), tf.Session(config=gpu_config) as session:
        model = NN(config, is_training=True)
        valid_model = NN(eval_config, is_training=False)
        tf.global_variables_initializer().run()
        step = 0
        train_loss = 0
        for i in range(config.epochs):
            xs, ys = mnist.train.next_batch(config.batch_size)
            session.run(model.optimizer, feed_dict={model.x: xs, model.y_: ys})
            step += 1
            train_loss += session.run(model.loss,
                                      feed_dict={
                                          model.x: xs,
                                          model.y_: ys
                                      })
            if step % 300 == 0:
                print("After {0} training steps, loss is {1}".format(
                    step, train_loss / step))
        print("The training accuracy is %.4f\n" %
              session.run(model.accuracy,
                          feed_dict={
                              model.x: mnist.train.images,
                              model.y_: mnist.train.labels
                          }))

        model.keep_prob = 1.0
        test_acc = session.run(model.accuracy,
                               feed_dict={
                                   model.x: mnist.test.images,
                                   model.y_: mnist.test.labels
                               })
        print(
            "After {0} training steps, test accuracy using average model is {1}"
            .format(eval_config.epochs, round(test_acc, 3)))
예제 #27
0
def run():
    global model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Create dataset
    template_dataset = TemplateDataset(config)
    training_loader, validation_loader, test_loader = template_dataset.get_loaders(
    )

    # Create the neural network
    model = NN(net, optimizer, loss_function, lr_scheduler, metric, device,
               config).to(device)

    # Create the data handler
    data_handler = DataHandler(training_loader, validation_loader, test_loader)

    for epoch in range(config['epochs']):
        # Training
        model.train()
        for i, data in enumerate(training_loader, 0):
            x, y = data
            x, y = x.to(device), y.to(device)
            y_hat = model(x)
            loss = model.backpropagate(y_hat, y)
            result = model.evaluate(y_hat, y)
            data_handler.train_loss.append(loss)
            data_handler.train_metric.append(result)

        with torch.no_grad():
            model.eval()
            # Validating
            if validation_loader is not None:
                for i, data in enumerate(validation_loader, 0):
                    x, y = data
                    x, y = x.to(device), y.to(device)
                    y_hat = model(x)
                    _, loss = model.calculate_loss(y_hat, y)
                    result = model.evaluate(y_hat, y)
                    data_handler.valid_loss.append(loss)
                    data_handler.valid_metric.append(result)

            # Testing
            if test_loader is not None:
                for i, data in enumerate(test_loader, 0):
                    x, y = data
                    x, y = x.to(device), y.to(device)
                    y_hat = model(x)
                    _, loss = model.calculate_loss(y_hat, y)
                    result = model.evaluate(y_hat, y)
                    data_handler.test_loss.append(loss)
                    data_handler.test_metric.append(result)

        model.lr_scheduler_step()
        data_handler.epoch_end(epoch, model.get_lr())
    data_handler.plot(loss=config['plot']['loss'],
                      metric=config['plot']['metric'])
예제 #28
0
def createInitialPopulation():
    from game import Player

    for i in range(count):
        network = NN(2, 10, 2, i)
        netArray.append(network)
        p = Player(network)
        players.append(p)

    crea = True
예제 #29
0
    def __init__(self):
        self.maxdepth = 4
        self.moves = ["up", "right", "down", "left"]
        self.nn = NN([16, 4])

        self.weightFile = open("2048_nn_weights.pysave", "wb+")
        if os.stat("2048_nn_weights.pysave").st_size > 0:
            self.nn.loadWeights(self.weightFile)

        signal.signal(signal.SIGINT, self.signal_handler)
예제 #30
0
def main():

    training_data, test_data, output_rsts = load_data()

#######################################
#Training
#######################################
    input_layer_size = len(training_data[0][0])
    net = NN((input_layer_size, 30, 2), output_rsts)

    net.SGD(training_data, mini_batch_size=10, epochs=30, eta=3.0, test_data=test_data)