Пример #1
0
def main(args):

    config_params = read_params(args.config_fpath)

    if args.gpu < 0:
        cuda = False
    else:
        cuda = True
        torch.cuda.set_device(args.gpu)

    print('*** Create data loader ***')
    dataloader, val_dataloader, test_dataloader = make_data_loader(
        args.batch_size, dataset_name='Letter-low', cuda=cuda)

    print('*** Create model ***')
    model = Model(config=config_params, verbose=True, cuda=cuda)
    if cuda:
        model.cuda()

    # optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    # loss function
    loss_fcn = torch.nn.CrossEntropyLoss()

    # Start training
    print('*** Start training ***')
    step = 0
    model.train()
    losses = []
    for epoch in range(args.n_epochs):
        for iter, (graphs, labels) in enumerate(dataloader):

            # forward pass
            logits = model(graphs)

            # compute loss
            loss = loss_fcn(logits, labels)
            losses.append(loss.item())

            # backpropagate
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # testing
            step += 1
            if step % args.eval_every == 0:
                val_loss, val_acc = test(val_dataloader, model, loss_fcn)
                print(
                    "Step {:05d} | Train loss {:.4f} | Over {} | Val loss {:.4f} |"
                    "Val acc {:.4f}".format(
                        step,
                        np.mean(losses),
                        len(losses),
                        val_loss,
                        val_acc,
                    ))
                model.train()

    print('*** Start Testing ***')
    test_loss, test_acc = test(test_dataloader, model, loss_fcn)
    print("Test loss {:.4f} | Test acc {:.4f}".format(test_loss, test_acc))
Пример #2
0
class Classifier(object):

    def __init__(self, dataset):
        # self.imagegen = ImageDataGenerator(shear_range=0.2,zoom_range=0.2,horizontal_flip=True)
        self.dataset = dataset
        self.train_x,self.train_y = dataset[0]
        self.test_x,self.test_y = dataset[1]
        self.learning_rate = 0.16
        self.eps = 2e-9
        self.params = {}
        self.model = Model()

    def error(self, Y_actual, Y_output):
        Y = Y_actual - Y_output
        print(Y)
        error = (1/Y.shape[0])*np.sum(Y*Y.T)
        return error

    def compute_gradients(self, cost, parameters):
        grads = {}
        grads["dW1"] = T.grad(cost,parameters["W1"])
        grads["db1"] = T.grad(cost,parameters["b1"])
        grads["dW2"] = T.grad(cost,parameters["W2"])
        grads["db2"] = T.grad(cost,parameters["b2"])
        grads["dW3"] = T.grad(cost,parameters["W3"])
        grads["db3"] = T.grad(cost,parameters["b3"])
        grads["dW5"] = T.grad(cost,parameters["W5"])
        grads["dW6"] = T.grad(cost,parameters["W6"])

        return grads

    def train(self):
        parameters = None
        num_epoch = 1
        while num_epoch > 0:
            # mini_batch_iter = self.imagegen.flow(self.train_x,self.train_y,batch_size=16)

            train_batch_inputs = np.array_split(self.train_x, 16)
            train_batch_labels = np.array_split(self.train_y, 16)
            train_batch_list = zip(train_batch_inputs,train_batch_labels)
            for train_batch in train_batch_list:
                # sh = self.model.shape_dim(train_batch[0],train_batch[1])
                cost = self.model.train(train_batch[0],train_batch[1])
                if current_data%100 == 0:
                    print(str(current_data*16)+" datas trained")
                if current_data == len(self.train_x):
                    break
                current_data += 1
            # error = self.model.test(self.test_x.astype(theano.config.floatX),self.test_y)
            # print("Epoch "+str(2-num_epoch)+" done")
            # print("Error: "+str(error))
            num_epoch -= 1
        self.params = self.model.parameters()
        saveModel(self.params)

    def test(self, parameters = None):
        error = self.model.test(self.test_x.astype(theano.config.floatX), self.test_y)
        return error
        # error = self.error(self.test_y,Y_predict)
        # return error

    def cost_function(self, y_predict, y_label):
        return y_label*np.log(y_predict) + (1-y_label)*np.log(1-y_predict)