Example #1
0
def main():

    args = arguments()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': 1} if use_cuda else {}
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data', train=True, download=True, transform=transform),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    valid_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data', train=False, transform=transform),
                                               batch_size=args.test_batch_size,
                                               shuffle=True,
                                               **kwargs)

    model = ConvNet().to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        valid(args, model, device, valid_loader)
Example #2
0
def train(data,
          labels,
          output,
          test_size=0.33,
          batch_size=32,
          epochs=10000,
          patience=100):
    # load data
    x = np.load(data)
    y = np.load(labels)

    # reshape
    n, m, k = x.shape
    x = x.reshape((n, m, k, 1))

    # test/train split
    mask = test_train_split(x, test_size=test_size)
    x_train = x[mask]
    x_validation = x[~mask]

    y_train = y[mask]
    y_validation = y[~mask]

    # create model
    model = ConvNet(dim=k,
                    kernels=[7, 3, 3],
                    filters=[64, 192, 480],
                    maxpool=[3, 3, 3],
                    stride=[2, 2, 2],
                    dropout=0.2,
                    nlabels=y.shape[-1])
    model.create()

    # early stop
    early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=patience,
                                               mode='min')

    # checkpoint
    checkpoint = keras.callbacks.ModelCheckpoint(join(output, 'convnet.h5'),
                                                 monitor='val_loss',
                                                 save_best_only=True,
                                                 mode='min')

    # print
    print(model.network.summary())

    # train
    model.network.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(x_validation, y_validation),
                      callbacks=[early_stop, checkpoint],
                      shuffle=True,
                      verbose=2)
Example #3
0
def go():
    import time
    #########################  EPOCH BATCH SIZE
    EPOCH = 1
    BATCH_SIZE = 256

    #########################  Read Data
    # trainset, valset, testset = load_mnist()
    train_images, train_labels = load_mnist(kind='train')
    test_images, test_labels = load_mnist(kind='t10k')
    # #########################  Network
    # net = FCNet()
    # #
    # # #########################  Compile
    # net.compile(optimizer='sgd', loss='cross_entropy')
    # #
    # # #########################  Train FC
    # st = time.time()
    # net.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCH, verbose=1)
    # et = time.time()
    # print('time cost: ', et - st)
    # print(u'当前进程的内存使用:%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024))
    #
    # #########################  Evaluate FC
    # train_acc, train_loss = net.evaluate(train_images, train_labels, batch_size=500)
    #
    # test_acc, test_loss = net.evaluate(test_images, test_labels, batch_size=500)
    # print('#' * 10, 'Fully Connected Network')
    # print(f'Train acc -> {train_acc}, Train loss -> {train_loss}')
    # print(f'Test acc -> {test_acc}, Test loss -> {test_loss}')

    print('Start Training ConvNet---')
    #########################  Network
    net = ConvNet()

    #########################  Compile
    net.compile(optimizer='sgd', loss='cross_entropy', lr=0.01)

    #########################  Train Conv
    st = time.time()
    net.fit(train_images,
            train_labels,
            batch_size=BATCH_SIZE,
            epochs=EPOCH,
            verbose=1)
    et = time.time()
    print('time cost: ', et - st)
    print(u'当前进程的内存使用:%.4f GB' %
          (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024))
    # #########################  Evaluate FC
    train_acc, train_loss = net.evaluate(train_images,
                                         train_labels,
                                         batch_size=500)

    test_acc, test_loss = net.evaluate(test_images,
                                       test_labels,
                                       batch_size=500)
    print('#' * 10, 'Convolutional Network')
    print(f'Train acc -> {train_acc}, Train loss -> {train_loss}')
    print(f'Test acc -> {test_acc}, Test loss -> {test_loss}')
Example #4
0
    def __init__(self, state_dim, action_dim, n_latent_var):
        super(ActorCritic, self).__init__()

        # actor
        # self.action_layer = nn.Sequential(
        #         nn.Linear(state_dim, n_latent_var),
        #         nn.Tanh(),
        #         nn.Linear(n_latent_var, n_latent_var),
        #         nn.Tanh(),
        #         nn.Linear(n_latent_var, action_dim),
        #         nn.Softmax(dim=-1)
        #         )
        # self.action_layer = nn.Sequential(
        #     nn.Conv2d(3, 16, kernel_size=5, stride=2),
        #     nn.BatchNorm2d(16),
        #     nn.ReLU(),
        #     nn.Conv2d(16, 32, kernel_size=5, stride=2),
        #     nn.BatchNorm2d(32),
        #     nn.ReLU(),
        #     nn.Conv2d(32, 32, kernel_size=5, stride=2),
        #     nn.BatchNorm2d(32),
        #     nn.ReLU(),
        #     nn.Flatten(),
        #     nn.Linear(800,action_dim),
        #     nn.Softmax(dim=-1)
        # )
        self.action_layer = ConvNet(64, 64, action_dim, True)
        # critic
        # self.value_layer = nn.Sequential(
        #         nn.Linear(state_dim, n_latent_var),
        #         nn.Tanh(),
        #         nn.Linear(n_latent_var, n_latent_var),
        #         nn.Tanh(),
        #         nn.Linear(n_latent_var, 1)
        #         )
        # self.value_layer = nn.Sequential(
        #     nn.Conv2d(3, 16, kernel_size=5, stride=2),
        #     nn.BatchNorm2d(16),
        #     nn.ReLU(),
        #     nn.Conv2d(16, 32, kernel_size=5, stride=2),
        #     nn.BatchNorm2d(32),
        #     nn.ReLU(),
        #     nn.Conv2d(32, 32, kernel_size=5, stride=2),
        #     nn.BatchNorm2d(32),
        #     nn.ReLU(),
        #     nn.Flatten(),
        #     nn.Linear(800, 1)
        # )
        self.value_layer = ConvNet(64, 64, 1)
Example #5
0
def test():
    data = DataSet('test')
    net = ConvNet(net_name, BATCH_SIZE_TEST)
    n_correct = 0

    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("load model -- succeed")
        else:
            print("load model -- fail")
        while data.cur_epoch == 0:
            batch_data, batch_label = data.next_batch()
            feed_dict = {
                net.ph_inputs: batch_data,
                net.ph_labels: batch_label,
                net.ph_mean: data.mean_image,
                net.ph_training: False
            }
            _pred, _loss = sess.run([net.pred, net.loss], feed_dict=feed_dict)
            _pred = _pred.astype(np.uint8)
            correct = np.zeros(data.batch_size, dtype=np.uint8)
            correct[_pred == batch_label] = 1
            n_correct += sum(correct)

        print('test: %.4f' % (n_correct /
                              (data.batch_size * data.batch_per_epoch)))
Example #6
0
def train():
    lr = LR
    data = DataSet('train')
    valid_data = DataSet('valid')
    net = ConvNet(net_name, BATCH_SIZE_TRAIN)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        summary_writer = tf.summary.FileWriter(tb_dir, sess.graph)

        while data.cur_iteration < MAX_ITERATION:
            if data.cur_iteration == 32000 or data.cur_iteration == 48000:
                lr /= 10
            batch_data, batch_label = data.next_batch()

            feed_dict = {
                net.ph_inputs: batch_data,
                net.ph_labels: batch_label,
                net.ph_mean: data.mean_image,
                net.ph_lr: lr,
                net.ph_training: True
            }

            _, _loss, _summary = sess.run(
                [net.optimizer, net.loss, net.summary], feed_dict=feed_dict)
            if data.cur_iteration % 100 == 0:
                print("ite:%5d  ep:%4d  loss:%4.4f" %
                      (data.cur_iteration, data.cur_epoch, _loss))
                summary_writer.add_summary(_summary, data.cur_iteration)
            if data.cur_iteration % 5000 == 0:
                ckpt_fname = 'train_model.ckpt'
                ckpt_full_fname = os.path.join(model_dir, ckpt_fname)
                saver.save(sess, ckpt_full_fname, data.cur_epoch)

                n_correct = 0
                while valid_data.cur_epoch == 0:
                    batch_data, batch_label = valid_data.next_batch()
                    feed_dict = {
                        net.ph_inputs: batch_data,
                        net.ph_labels: batch_label,
                        net.ph_mean: valid_data.mean_image,
                        net.ph_training: False
                    }
                    _pred, _loss = sess.run([net.pred, net.loss],
                                            feed_dict=feed_dict)
                    _pred = _pred.astype(np.uint8)
                    correct = np.zeros(data.batch_size, dtype=np.uint8)
                    correct[_pred == batch_label] = 1
                    n_correct += sum(correct)
                pctg = n_correct / (valid_data.batch_per_epoch *
                                    valid_data.batch_size)
                _sum = sess.run(net.summary_valid,
                                feed_dict={net.ph_pctg: pctg})
                summary_writer.add_summary(_sum, data.cur_iteration)
                valid_data.reset()
                print('validation: %.4f' % pctg)
Example #7
0
def copynet(net):
    newnet = ConvNet(channels=64, blocks=3)
    newnet.load_state_dict(net.state_dict())
    return newnet
Example #8
0
        return np.concatenate([top10, remain])

    def mutate(self, network):
        network = copynet(network)
        for param in network.parameters():
            param.data += (torch.rand_like(param) <
                           self.mutation_rate) * torch.randn_like(param)
        return network

    def recombine(self, network1, network2):
        network1 = copynet(network1)
        network2 = copynet(network2)
        for p1, p2 in zip(network1.parameters(), network2.parameters()):
            if random.getrandbits(1):
                p1.data = p2.data
        return network1


if __name__ == '__main__':
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    nets = [ConvNet(64, 3) for _ in range(50)]
    for n in nets:
        n.to(device)
    pop = NetworkPopulation(nets)
    eval_array = []
    for _ in range(3):
        pop.eval()
        print(pop.evals)
        eval_array.append(pop.evals)
        pop = NetworkPopulation(pop.select())
Example #9
0
                b = [b == i for i in range(16)]
                b = torch.stack(b, dim=1).float()
                preds = model(b.view(-1, 16, 4, 4))
                preds = torch.argsort(preds.cpu(), dim=1, descending=True)
                dead_s = games.move_batch(preds)
                if dead_s:
                    result.append(count)
                    break
                count += 1
    return np.mean(result)


if __name__ == '__main__':
    from time import time
    from network import ConvNet
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f'Using {device}')

    names = [
    ]
    for name in names:
        print(name)
        m = ConvNet(**{'channels': 64, 'blocks': 3})
        m.load_state_dict(torch.load('models/{}.pt'.format(name), map_location=device))
        m.to(device)
        t = time()
        print(eval_nn(m, name, number=5000, device=device, verbose=True))
        t = time() - t
        print('{0:.3f} seconds'.format(t))
        print('-'*10)
Example #10
0
def run_experiments():

    ###################################EXPERIMENT_1##############################################################
    '''
    DESCRIPTION
    Training and testing set both contain all the recordings. 80-20 split random state =  42
    '''
    '''
    ID: 5924295
    '''

    list_IDs = []
    train_list_IDs = []
    test_list_IDs = []
    y = []
    IDs = [1, 2, 3, 4]
    list_IDs, y = separate_data_by_mic_id_train(IDs)
    train_list_IDs, test_list_IDs, y_train, y_test = train_test_split(
        list_IDs, y, test_size=0.2, random_state=42)  # 100

    ######HYPERPARAMETERS#############################################
    num_epochs = 10
    num_classes = 2
    learning_rate = 1e-3
    batch_size = 1
    #################################################################

    training_set = TrainDataset(train_list_IDs, y_train)
    train_loader = torch.utils.data.DataLoader(dataset=training_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    test_set = TestDataset(test_list_IDs, y_test)  # test_list_Ids
    test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                              batch_size=batch_size,
                                              shuffle=True)
    if use_cuda:
        model = ConvNet(num_classes).cuda()
    else:
        model = ConvNet(num_classes)

    # Loss and optimizer

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training
    train_mode = True
    print('starting training')
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)

    PATH = '/home/shasvatmukes/project/audio_classification/weights/simple_CNN_weights_log1.pth'  # unique names
    torch.save(model.state_dict(), PATH)

    model.load_state_dict(torch.load(PATH))
    # Test
    train_mode = False
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)
    '''
Example #11
0
### Split data into test and train ###
NTRAIN = int(NCLASSES * NFILES * 0.8)
NTEST = NCLASSES * NFILES - NTRAIN
# Create random indices
indicesTrain = list(np.random.choice(NCLASSES * NFILES, NTRAIN, replace=False))
indicesTest  = [i for i in range(0, NCLASSES * NFILES) if i not in indicesTrain]
# Train sets, move to device
dataTrain   = torch.FloatTensor([data[i]     for i in indicesTrain]).to(device)
labelsTrain = torch.FloatTensor([iClasses[i] for i in indicesTrain]).long().to(device)
# Test sets, keep on cpu
dataTest   = torch.FloatTensor([data[i]     for i in indicesTest])
labelsTest = torch.FloatTensor([iClasses[i] for i in indicesTest]).long()
## .long() fixes : RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target'

### Create model ###
model = ConvNet(NCLASSES, imageSize=IMAGE_SIZE, nConvLayers=NLAYERS, nchannels=NCHANNELS).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
batchSize = math.ceil(NTRAIN / NBATCHES)

### Create directory to store model weights ###
nowStr = datetime.now().strftime("%y%m%d-%H%M%S")
modelDir = "model_%s_%d_%d_%d_%d_%d_%d" % (nowStr, NCLASSES, NFILES, NBATCHES, NLAYERS, NCHANNELS, IMAGE_SIZE)
os.mkdir(modelDir)
copyfile("./network.py", modelDir + "/network.py")
f = open(modelDir + "/classes.txt", "w")
f.write(" ".join(classes))
f.close()

f = open(modelDir + "/indices.txt", "w")
f.write("train " + " ".join(map(str, indicesTrain)) + "\n")
Example #12
0
    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--step_size', type=int, default=5)
    parser.add_argument('--gamma', type=float, default=0.5)    
    args = parser.parse_args()
    
    trainset = MyDataset('train')
    train_loader = DataLoader(dataset=trainset, num_workers=4, batch_size=args.batch_size, shuffle=True, drop_last=True, pin_memory=True)

    valset = MyDataset('val')
    val_loader = DataLoader(dataset=valset, num_workers=4, batch_size=args.batch_size, pin_memory=True)

    testset = MyDataset('test')
    test_loader = DataLoader(dataset=testset, num_workers=4, batch_size=args.batch_size, pin_memory=True)    
    
    model = ConvNet()   
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)   
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)        
    model = model.cuda()

    best_acc = 0.0
    for epoch in range(args.max_epoch):
        lr_scheduler.step()
        model.train()
        for i, batch in enumerate(train_loader):
            imgs, labels = batch[0].cuda(), batch[1].cuda()
            optimizer.zero_grad()
            logits = model(imgs)
            loss = F.cross_entropy(logits, labels)
            loss.backward()
            optimizer.step()
Example #13
0
        raise ValueError('Seed is {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    name = str(seed).zfill(5)

    start_time = time()
    use_network = False
    if not use_network:
        selfplay_fixed(name, number=10, times=4, verbose=args.verbose)
    else:
        torch.backends.cudnn.benchmark = True
        m_name = 'models/20200207/best_s7_jit.pth'
        if not os.path.exists(m_name):
            full_name = '20200207/0_1000_soft3.0c64b3_p10_bs2048lr0.1d0.0_s7_best'
            print('Tracing: {}'.format(full_name))
            model = ConvNet(channels=64, blocks=3)
            model.load_state_dict(torch.load('models/{}.pt'.format(full_name)))
            model.to('cuda')
            model.eval()
            model = torch.jit.trace(model, torch.randn(10, 16, 4, 4, dtype=torch.float32, device='cuda'))
            torch.jit.save(model, m_name)
            print('Jit model saved')
        else:
            print(m_name)
            model = torch.jit.load(m_name)
            selfplay(name, model, number=10, times=4, verbose=args.verbose)

    total_time = time() - start_time
    print(f'Took {total_time/60:.1f} minutes')
Example #14
0
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x + bernoulli.sample(x[0].shape).reshape(x.shape[1], x.shape[2]).unsqueeze(0) * -torch.rand(x.shape))
])

# The dataset
train_dataset = NumbersDataSet('../new_data/', transform=transformOpt)


# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)


# Initialize our network
model = ConvNet(num_classes).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train the model
total_step = len(train_loader)

plot_training = []

for epoch in range(num_epochs):
    loss_training = 0.0
    epoch_loss = 0.0
    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
import numpy as np
import torch
import cv2
import imutils
from network import ConvNet

# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# Initialize the network
model = ConvNet().to(device)

# Loading the weights
model.load_state_dict(torch.load('../model_final.ckpt'))

# Eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
model.eval()

image = cv2.imread("../test.png")

gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8, cv2.BORDER_REPLICATE)

thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Hack for compatibility with different OpenCV versions
contours = contours[1] if imutils.is_cv3() else contours[0]
Example #16
0
def main(args):
    if args.name:
        args.name += '_'
    logname = f'{args.name}{args.t_tuple[0]}_{args.t_tuple[1]}_soft{args.soft}' \
              f'c{args.channels}b{args.blocks}_p{args.patience}_' \
              f'bs{args.batch_size}lr{args.lr}d{args.decay}_s{args.seed}'
    print(logname)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using {}'.format(device))
    torch.backends.cudnn.benchmark = True

    train_set = OneHotConvGameDataset(args.path, args.t_tuple[0], args.t_tuple[1], device, soft=args.soft)
    train_dat = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)

    m = ConvNet(channels=args.channels, blocks=args.blocks)
    if args.pretrained:
        m.load_state_dict(torch.load('models/{}.pt'.format(args.pretrained), map_location=device))
        print('Loaded ' + args.pretrained)
        logname = 'pre_'+logname
    m.to(device)
    loss_fn = nn.KLDivLoss(reduction='batchmean')
    optimizer = torch.optim.Adam(m.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.decay)
    t_loss = []
    min_move = []
    best = 0.0
    timer = 0
    if args.patience == 0:
        stop = args.epochs
    else:
        stop = args.patience

    data_len = len(train_dat)
    for epoch in range(args.epochs):
        print('-' * 10)
        print('Epoch: {}'.format(epoch))
        timer += 1

        m.train()
        running_loss = 0
        for x, y in tqdm(train_dat):
            optimizer.zero_grad()
            pred = m(x)
            loss = loss_fn(pred, y)
            running_loss += loss.data.item()
            loss.backward()
            optimizer.step()
        running_loss /= data_len
        if epoch == 2 and running_loss > 210/1000:
            stop = 0
        print('Train mLoss: {:.3f}'.format(1e3 * running_loss))
        t_loss.append(running_loss)
        
        m.eval()
        time1 = time()
        ave_min_move = eval_nn_min(m, number=10, repeats=40, device=device)
        time_str = ', took {:.0f} seconds'.format(time()-time1)
        min_move.append(ave_min_move)
        if ave_min_move >= best:
            tqdm.write(str(ave_min_move) + ' ** Best' + time_str)
            best = ave_min_move
            timer = 0
            torch.save(m.state_dict(), 'models/' + logname + '_best.pt')
        else:
            tqdm.write(str(ave_min_move) + time_str)

        if timer >= stop:
            print('Ran out of patience')
            print(f'Best score: {best}')
            # torch.save(m.state_dict(), 'models/'+logname+f'_e{epoch}.pt')
            break
        else:
            print(f'{stop - timer} epochs remaining')

    np.savez('logs/'+logname,
             t_loss=t_loss,
             min_move=min_move,
             params=args)
Example #17
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch FGSM')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        metavar='N',
                        help='number of epochs to train (default: 5)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.001)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=640,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--epsilon',
                        type=float,
                        default=0.25,
                        help='epsilon(perturbation) of adversarial attack')
    parser.add_argument('--dataset-normalize',
                        action='store_true',
                        default=False,
                        help='input whether normalize or not (default: False)')
    parser.add_argument(
        '--network',
        type=str,
        default='fc',
        help=
        'input Network type (Selected: fc, conv, drop, googlenet / default: \'fc\')'
    )
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--dataset',
                        type=str,
                        default='mnist',
                        help='choose dataset : mnist or cifar')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    transformation = transforms.ToTensor()
    # Dataset normalize
    if args.dataset_normalize:
        transformation = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])

    # dataset

    if args.dataset == 'cifar':  # cifar10
        train_dataset = datasets.CIFAR10('../data',
                                         train=True,
                                         download=True,
                                         transform=transformation)
        test_dataset = datasets.CIFAR10('../data',
                                        train=False,
                                        download=True,
                                        transform=transformation)
    else:  # mnist(default)
        train_dataset = datasets.MNIST('../data',
                                       train=True,
                                       download=True,
                                       transform=transformation)
        test_dataset = datasets.MNIST('../data',
                                      train=False,
                                      download=True,
                                      transform=transformation)

    # MNIST dataset
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=True)

    # Network Type
    if args.network == 'conv':
        model = ConvNet().to(device)
    elif args.network == 'drop':
        model = DropNet().to(device)
    elif args.network == 'googlenet' or args.dataset == 'cifar':
        model = GoogleNet().to(device)
    elif args.network == 'fc':  # default
        model = FcNet().to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train(args, model, device, train_loader, optimizer)

    test(args, model, device, test_loader)
Example #18
0
                                       train=False,
                                       download=True,
                                       transform=test_transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Verify if CUDA is available
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# Definte net
net = ConvNet(widen_factor=32)
if device == 'cuda':
    net = net.cuda()
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.SGD(net.parameters(), lr=0.025, momentum=0.9)


# Training
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
from network import ConvNet
import numpy as np
from dataset import getDataset

dataset = getDataset()

print(dataset["balance_train"])

conv = ConvNet(dataset)
conv.fit(dataset["one_hot_train"],
         dataset["one_hot_label_train"],
         dataset["one_hot_validation"],
         dataset["one_hot_label_validation"],
         graphics=True)

print("Final accuracy:")
print(" " + str(
    conv.computeAccuracy(dataset["one_hot_validation"],
                         dataset["labels_validation"])))
print("Friends:")
conv.classify(dataset["one_hot_friends"])
#
# F = np.zeros((4, 2, 2))
# # print(F[:, :, 0])
# # print(F[:, :, 0].shape)
# F[:, :, 0] = [[1, 2], [3, 4], [5, 6], [7, 8]]
# F[:, :, 0] = [[1, 2], [3, 4], [5, 6], [7, 8]]
# # print(F[:, :, 0])
# # print(F[:, :, 1])
# # print(F)
#