Пример #1
0
    def __init__(
        self,
        s_dim,
        a_num,
        device,
        hidden,
        lr,
        gamma,
    ):
        # Parameter Initialization
        self.s_dim = s_dim
        self.a_num = a_num
        self.device = device
        self.hidden = hidden
        self.lr = lr
        self.gamma = gamma

        # network initialization
        self.net = Net(s_dim, hidden, a_num).to(self.device)
        self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)

        # the memory only need to store a trajectory
        self.memory_s = []
        self.memory_a = []
        self.memory_r = []
Пример #2
0
def main():
    # hyperparams
    epochs = 1000
    lr = 0.02
    ls = 128
    size = 1000

    # collect data and split with sklearn
    # data types: "moons", "multi", "diabetes", "digit"
    data_type = "digit"
    features, labels = get_data(size, data_type)
    one_hot_target = pd.get_dummies(labels)
    train_x, x_val, train_y, y_val = train_test_split(features, one_hot_target, test_size=0.1, random_state=20)
    train_y = np.array(train_y)
    y_val = np.array(y_val)

    # training
    model = Net(train_x, train_y, epochs, ls, lr)
    model.train()

    # testing
    if data_type != "digit":
        plt.subplot(2,1,1)
        plt.title('Training Batch')
    print("Training accuracy: ", test(model, train_x, train_y, data_type))
    if data_type != "digit":
        plt.tight_layout(pad=3.0)
        plt.subplot(2,1,2)
    print("Test accuracy: ", test(model, x_val, np.array(y_val), data_type))
    if data_type != "digit":
        plt.title('Testing Batch')
        plt.show()
 def main(self):
     # torch.cuda.manual_seed(self.config.SEED)
     model = Net()
     # 利用GPU训练
     model.cuda()
     # 训练三世代
     for e in range(self.config.EPOCHS):
         self.train(model)
         self.test(model)
     self.saveModel(model, 'lianzheng_mnist.pth')
     self.saveONNX(model, 'lianzheng_mnist.onnx')
     return model
Пример #4
0
def GetNet(mes):
    layer = []
    act = []
    for i in mes.split():
        if i[0]>='a' and i[0]<='z' or i[0]>'A' and i[0]<='Z':
            act.append(i)
        else:
            layer.append(int(i))
    net=Net()
    for i in range(len(act)):
        net.addLayer(Layer(layer[i], layer[i+1], act[i]))
    return net
Пример #5
0
def main():
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    torch.manual_seed(args.seed)

    # Fetch Some Data...
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = Net().to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)
        scheduler.step()

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
Пример #6
0
def train(flags=FLAGS, hps=HPS):
    from Network import Net
    net = Net(flags, hps)

    for g in range(flags.global_epoch):
        with timer(f'Global epoch #{g}'):
            logger.debug(f'Start global epoch {g}')
            net.train(porportion=0.01)
            l, acc = net.test(porportion=0.1)
            logger.debug(f'Finish global epoch {g}')

    net.save_model(name=f'{l:.4f}-{acc:.4f}')
    logger.info('All done')
 def loadModel(self, filepath):
     '''
     加载权重参数
     :param filepath:
     :return:
     '''
     model = Net()
     model.cuda()
     model.load_state_dict(torch.load(filepath))
     return model
Пример #8
0
def test(flags=FLAGS, hps=HPS):
    from Network import Net
    import numpy as np
    net = Net(flags, hps)
    load_model_path = './savedmodels/model-0.2513-0.9609.ckpt-1000'
    net.restore_model(load_model_path)
    for i in range(10):
        masked_cigits = np.zeros((10, 16))
        digit = np.random.randint(10)
        masked_cigits[digit] = np.random.random(16)
        net.reconstruct_img(masked_cigits)
    print("Dataset: " + conf['data_path'])
    print("Model: " + conf['model_path'])

    print("Evaluating with " + data_type + " a " + complexity + " " +
          net_type + " model")

    # Load data
    if data_type == "Functions_dataset":
        parameters, test_set = func_utils.read_function_data(conf['data_path'])
        gap = float(parameters[0][3])
        dim = None

        print('Puting the test data into the right shape...')
        testX, testY = func_utils.reshape_function_data(test_set)

        to_test_net = Net.Mlp(model_file=conf['model_path'], framework="keras")

    elif data_type == "Vectors_dataset":
        parameters, test_set = vect_utils.read_vector_data(conf['data_path'])
        gap = parameters.iloc[0]['gap']
        dim = None

        print('Puting the test data into the right shape...')
        testX, testY = vect_utils.reshape_vector_data(test_set)
        if net_type == "NOREC":
            to_test_net = Net.Convolution1D(model_file=conf['model_path'],
                                            framework="keras")
        else:
            to_test_net = Net.Lstm(model_file=conf['model_path'],
                                   framework="keras")
Пример #10
0
class PolicyGradient:
    def __init__(
        self,
        s_dim,
        a_num,
        device,
        hidden,
        lr,
        gamma,
    ):
        # Parameter Initialization
        self.s_dim = s_dim
        self.a_num = a_num
        self.device = device
        self.hidden = hidden
        self.lr = lr
        self.gamma = gamma

        # network initialization
        self.net = Net(s_dim, hidden, a_num).to(self.device)
        self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)

        # the memory only need to store a trajectory
        self.memory_s = []
        self.memory_a = []
        self.memory_r = []

    def get_action(self, s):
        s = torch.FloatTensor(s).to(self.device)
        prob_weights = self.net(s)
        # select action w.r.t the actions prob
        dist = Categorical(prob_weights)
        action = (dist.sample()).detach().item()
        return action

    def store_transition(self, s, a, r):
        self.memory_s.append(s)
        self.memory_a.append(a)
        self.memory_r.append(r)

    def learn(self):
        discounted_r = self._discounted_r(self.memory_r)
        s = torch.FloatTensor(self.memory_s).to(self.device)
        a = torch.LongTensor(self.memory_a).to(self.device)
        r = torch.FloatTensor(discounted_r).to(self.device)
        # calculate loss
        prob = self.net(s)
        dist = Categorical(prob)
        loss = -torch.sum(dist.log_prob(a) * r)
        # train on episode
        self.opt.zero_grad()
        loss.backward()
        self.opt.step()
        # empty episode data
        self.memory_s = []
        self.memory_a = []
        self.memory_r = []

    def _discounted_r(self, r):
        length = len(r)
        discounted_r = np.zeros(length)
        running_add = 0
        for t in range(length - 1, -1, -1):
            running_add = r[t] + running_add * self.gamma
            discounted_r[t] = running_add
        # normalize episode rewards
        discounted_r -= np.mean(discounted_r)
        discounted_r /= np.std(discounted_r)
        return discounted_r
            filename = root + '/' + parameters[0][4] + '_' + parameters[0][3] + '_' + parameters[0][5] + '_Predictor'

        # Put the train data into the right shape
        trainX, trainY = func_utils.reshape_function_data(train_set)

        # Put the validation data into the right shape
        valX, valY = func_utils.reshape_function_data(val_set)

        train_data = [trainX, trainY]
        val_data = [valX, valY]

        # Model settings
        in_dim = trainX.shape[1:]
        out_dim = 1
        to_train_net = Net.Mlp(activation=activation, loss=loss, dropout=dropout,
                               drop_percentage=drop_percentage, input_shape=trainX[0].shape,
                               output_shape=out_dim, data_type="Function", framework="keras")

    elif data_type == 'Vectors_dataset':
        print('Training with vectors')
        loss = conf['vect_loss']
        # Load data
        channels = False
        batch_data = False
        _, train_set = vect_utils.read_vector_data(data_dir + 'train/samples')
        _, val_set = vect_utils.read_vector_data(data_dir + 'val/samples')
        filename = root

        # Put the train data into the right shape
        trainX, trainY = vect_utils.reshape_vector_data(train_set)
Пример #12
0
class qNet2048(object):
    EPOCH = 2000

    def __init__(self):
        self.net = Net(20, 50, 1)
        self.net.setEpoch(1)
        self.gamma = 0.8
        self.main()

    def main(self):
        self.train()
        self.playGame()

    def playGame(self):
        self.initNewGame()
        i = 0
        while self.gameRuning:
            print(' Move:', i)
            i += 1
            self.game.Print()
            (action, bestValue) = self.getMaxQ()
            self.game.Move(action)
        i += 1
        print(' Epoch:', i)
        self.game.Print()

    def train(self):
        for i in range(self.EPOCH):
            print('Game Epoch:', i + 1, '/', self.EPOCH, end='\r')
            self.initNewGame()
            while self.gameRuning:
                state = self.gridToVector()
                action = random.choice(list(DIRECTION))
                self.game.Move(action)
                (action, bestValue) = self.getMaxQ()
                inValue = state + self.directionToVector(action)
                newQ = self.game.GetLastMoveScore() + self.gamma * bestValue
                self.net.Train([inValue], [[newQ]])
            print('\nScore: ', self.game.GetTotalScore())
        print()

    def getMaxQ(self):
        directions = self.simDirections()
        best = max(directions, key=directions.get)
        return (best, directions[best][0])

    def simDirections(self):
        gridVector = self.gridToVector()
        result = {}
        for direction in DIRECTION:
            inputArray = gridVector[:] + self.directionToVector(direction)
            result[direction] = self.net.Sim(inputArray)
        return result

    def directionToVector(self, direction):
        if direction == DIRECTION.LEFT:
            return [1.0, 0.0, 0.0, 0.0]
        if direction == DIRECTION.RIGHT:
            return [0.0, 1.0, 0.0, 0.0]
        if direction == DIRECTION.UP:
            return [0.0, 0.0, 1.0, 0.0]
        if direction == DIRECTION.DOWN:
            return [0.0, 0.0, 0.0, 1.0]

    def gridToVector(self):
        tab = self.game.GetGrid()
        i = []
        for row in tab:
            i += row
        maxValue = max(i)
        return [x / maxValue for x in i]

    def initNewGame(self):
        self.game = Game()
        self.game.onGameOver(self.handleGameOver)
        self.gameRuning = True

    def handleGameOver(self):
        self.gameRuning = False
Пример #13
0
 def __init__(self):
     self.net = Net(20, 50, 1)
     self.net.setEpoch(1)
     self.gamma = 0.8
     self.main()
Пример #14
0
if __name__ == '__main__':
    # train acc
    print('processing training data...')
    train_set = face_crop(ROOT_DIR + train_scv)
    train_data = dataset(train_set)
    # test acc
    print('processing test data...')
    test_set = face_crop(ROOT_DIR + test_scv)
    test_data = dataset(test_set)

    train_loader = DataLoader(dataset=train_data, num_workers=4)
    test_loader = DataLoader(dataset=test_data, num_workers=4)

    ##### bulid a validation model #####
    device = torch.device('cuda')
    val_net = Net(ks=3, stride=1).to(device)
    print(val_net)

    train_acc = val(train_loader, val_net, pth)
    test_acc = val(test_loader, val_net, pth)
    print('Train acc', train_acc)
    print('Test acc', test_acc)

    val_net.load_state_dict(torch.load(pth, map_location=device))
    val_net.eval()

    visul_data = pd.read_csv(ROOT_DIR + test_scv)
    for v in visul:
        img = cv2.imread(ROOT_DIR + '/images/' + v)
        start = test_set['name'].index(v)
        step = test_set['name'].count(v)
Пример #15
0
import glob  # loading some train images
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as I
import csv
from Network import Net
# from VGG16 import Net
import torch.optim as optim
# from Utils import *
torch.cuda.is_available()

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = Net().to(device)
# path = "../CheckPoint/model"
# net.load_state_dict(torch.load(path))
# net.eval()
# root_dir='dogs-vs-cats/train/'
TestDir = "dogs-vs-cats/test1"
imageNames = glob.glob(TestDir + "/*")
for name in imageNames:
    img = cv2.imread(name)
    image = cv2.resize(img, (224, 224))
    image = image.transpose((2, 0, 1))
    image = np.float32(np.expand_dims(image, 0))
    ImgTensor = torch.from_numpy(image)
    logits, softmaxoutput = net(ImgTensor)
    print("\nOutput for image ", name, " is : ", torch.argmax(logits), logits)
    # print(logits)
Пример #16
0
    # test set
    print('processing test set...')
    test_set = face_crop(ROOT_DIR + test_scv)
    test_data = dataset(test_set)

    train_loader = DataLoader(dataset=train_data,
                              batch_size=10,
                              shuffle=True,
                              num_workers=4)

    test_loader = DataLoader(dataset=test_data,
                             batch_size=10,
                             shuffle=False,
                             num_workers=4)

    mask_Net = Net(ks=3, stride=1)
    mask_Net2 = Net(ks=3, stride=2)
    mask_Net3 = Net(ks=7, stride=1)
    mask_Net4 = Net(ks=7, stride=2)

    os.makedirs('./log', exist_ok=True)
    print('1st experiment (filter size=3*3, stride size=1) starts:')
    Trainer(dataloader=(train_loader, test_loader),
            net=mask_Net,
            num_epochs=50).run()
    print('2nd experiment (filter size=3*3, stride size=2) starts:')
    Trainer(dataloader=(train_loader, test_loader),
            net=mask_Net2,
            num_epochs=50).run()
    print('3rd experiment (filter size=7*7, stride size=1) starts:')
    Trainer(dataloader=(train_loader, test_loader),
Пример #17
0
    while True:
        a = input("waiting")
        if a == "x":
            break
        i = random.randint(0, len(images) - 1)
        image = images[i]
        label = labels[i]
        brain.guess(image)
        guess = get_max(brain.output)
        plot_image(image)
        print("number =", label)
        print("guess =", guess)


brain = Net(784, [300, 100], 10, ["sigmoid", "sigmoid", "softmax"])
brain.initialise_weights()

mndata = MNIST('digits')
images, labels = mndata.load_training()

images = normalise(images)

N = 1000000
i = 0
chunks = 100
chunk = 0
block_size = N // chunks

target = [0 for i in range(10)]