예제 #1
0
def info_tutorial():
    nn = NeuralNet()
    x_shape = dat.shape()
    test_gen, _ = dat.testset(batch_size=cfg.BATCH_SIZE,
                              max_samples=cfg.TEST_SET_SIZE)
    nn.test(test_gen, print_it=True)
    nn.net.initialize_spatial_layers(x_shape, cfg.BATCH_SIZE, PATCH_SIZE)
    nn.summary(x_shape, print_it=True)
    nn.print_weights()
    print(nn.output_size(x_shape))

    # Spatial Operations, defined one the net itself. Remember that after enabling a layer, ops are affected
    assert nn.net.num_spatial_layers() != 0
    nn.net.print_spatial_status()
    # nn.train(epochs=1, set_size=5000, lr=0.1, batch_size=cfg.BATCH_SIZE)  # Train to see fully disabled performance
    nn.net.print_ops_summary()
    nn.net.print_ops_summary(
        use_conv=True)  # Count convlution operations instead of MAC
    print(nn.net.num_ops())  # (ops_saved, total_ops)

    # Given x, we generate all spatial layer requirement sizes:
    spat_sizes = nn.net.generate_spatial_sizes(x_shape)
    print(spat_sizes)
    p_spat_sizes = nn.net.generate_padded_spatial_sizes(x_shape, PATCH_SIZE)
    print(p_spat_sizes)

    # Generate a constant 1 value mask over all spatial nets
    print(nn.net.enabled_layers())
    nn.net.fill_masks_to_val(1)
    print(nn.net.enabled_layers())
    print(nn.net.disabled_layers())
    nn.net.print_spatial_status(
    )  # Now all are enabled, seeing the mask was set
    nn.train(epochs=1, set_size=5000, lr=0.1, batch_size=cfg.BATCH_SIZE
             )  # Train to see all layers enabled performance
    nn.net.print_ops_summary()
    nn.net.print_ops_summary(
        use_conv=True)  # Count convlution operations instead of MAC
    nn.net.reset_spatial()  # Disables layers as well
    nn.net.print_ops_summary()
    nn.net.print_ops_summary(use_conv=True)
    # Turns on 3 ids and turns off all others
    chosen_victims = random.sample(range(nn.net.num_spatial_layers()), 4)
    nn.net.strict_mask_update(update_ids=chosen_victims[0:3],
                              masks=[
                                  torch.zeros(p_spat_sizes[chosen_victims[0]]),
                                  torch.zeros(p_spat_sizes[chosen_victims[1]]),
                                  torch.zeros(p_spat_sizes[chosen_victims[2]])
                              ])

    # Turns on one additional id and *does not* turn off all others
    nn.net.lazy_mask_update(
        update_ids=[chosen_victims[3]],
        masks=[torch.zeros(p_spat_sizes[chosen_victims[3]])])
    nn.net.print_spatial_status()  #
    print(nn.net.enabled_layers())
    nn.train(epochs=1, set_size=5000, lr=0.1,
             batch_size=cfg.BATCH_SIZE)  # Run with 4 layers on
    nn.net.print_ops_summary()
    nn.net.print_ops_summary(use_conv=True)
예제 #2
0
def training():
    # dat.data_summary(show_sample=False)
    nn = NeuralNet(resume=True)  # Spatial layers are by default, disabled
    nn.summary(dat.shape())
    nn.train(epochs=50, lr=0.01)
    test_gen, _ = dat.testset(batch_size=cfg.BATCH_SIZE,
                              max_samples=cfg.TEST_SET_SIZE)
    test_loss, test_acc, count = nn.test(test_gen)
    print(
        f'==> Final testing results: test acc: {test_acc:.3f} with {count}, test loss: {test_loss:.3f}'
    )
예제 #3
0
def main(loadfrom, saveto, dev_data):

    loadfrom = loadfrom.strip().split(',')

    net = []
    for ll in loadfrom:
        with open(ll, 'rb') as f:
            net.append(pkl.load(f))

    test = data_iterator(dev_data, net[0].options)

    print 'Testing...',
    preds = []
    n_samples = 0
    softmax = torch.nn.Softmax()
    for s1, s1m, labels in test:
        for nn in net:
            nn.train()
        s1_ = torch.from_numpy(numpy.array(s1))
        s1m_ = torch.from_numpy(numpy.array(s1m).astype('float32'))

        for ii, nn in enumerate(net):
            out = nn(Variable(s1_, requires_grad=False),
                     Variable(s1m_, requires_grad=False))
            out = softmax(out)
            out = out.data.numpy()
            if ii == 0:
                pp = out
            else:
                pp += out
        pp = pp / len(net)

        preds.append(pp.argmax(-1))
        n_samples += len(labels)

    preds = numpy.concatenate(preds, axis=0)
    preds = (2. * preds) - 1.

    pos = numpy.sum(preds == 1.)
    neg = numpy.sum(preds == -1.)
    print 'pos {} neg {}'.format(pos, neg)

    with open(saveto, 'w') as f_out:
        with open(dev_data, 'r') as f_in:
            print >> f_out, f_in.readline().strip()
            for ii, l in enumerate(f_in):
                print >> f_out, '{}\t{}'.format(l.strip(), int(preds[ii]))
예제 #4
0
def grad_train_loop(hypers, nn, criterion=torch.nn.MSELoss(reduction='mean')):
    for epoch in range(hypers['epochs']):
        nn.train()
        train_losses, test_losses = [], []
        for batch in range(hypers['batch_train']):
            graphs, labels = grad_generate_batch(hypers['H'], hypers['n'])
            loss = grad_eval_batch(nn, graphs, labels, criterion)
            loss.backward()
            train_losses.append(loss.item())
            nn.optim.step(), nn.zero_grad()

        nn.eval()
        for batch in range(hypers['batch_test']):
            graphs, labels = grad_generate_batch(hypers['H'], hypers['n'])
            loss = grad_eval_batch(nn, graphs, labels, criterion)
            test_losses.append(loss.item())
        print(
            f"Train loss is {sum(train_losses) / len(train_losses):.4E}.\nTest loss is {sum(test_losses)/len(test_losses):.4E}.\n"
        )
예제 #5
0
            self.predictions = predicted
            self.predictions = [predicted, targets]
            self.predictions = list(
                zip(self.predictions[0], self.predictions[1]))

    def predict(self, x):
        with torch.no_grad():
            output = self.model_gn(x.to(device))
            output = [x.argmax() for x in output]
            return self.model_gn(output.to(device))


#Training
nn = COVID_NN(64)
nn.train(20)

#Global accuracy
num = 0
for i in nn.predictions:
    if i[0] == i[1]:
        num += 1

num / len(nn.predictions)

#False Negative rate
FN = 0
for i in nn.predictions:
    if i[0] != 0 and i[1] == 0:
        FN += 1
예제 #6
0
parser.add_argument('--uniform', '-u', action='store_true', 
	help='disable neural-guidance and sample data points uniformely; corresponds to a DSAC model')

opt = parser.parse_args()

# setup training set
trainset = HLWDataset('hlw/split/train.txt', opt.imagesize, training=True)
trainset_loader = torch.utils.data.DataLoader(trainset, shuffle=True, num_workers=6, batch_size=opt.batchsize)

# setup ng dsac estimator
loss = Loss(opt.imagesize) 
ngdsac = NGDSAC(opt.hypotheses, opt.inlierthreshold, opt.inlierbeta, opt.inlieralpha, loss, opt.invalidloss)

# setup network
nn = Model(opt.capacity)
nn.train()
nn = nn.cuda()

# optimizer and lr schedule (schedule offset handled further below)
optimizer = optim.Adam(nn.parameters(), lr=opt.learningrate)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=opt.schedulestep, gamma=0.5)

# keep track of training progress
train_log = open('log_'+opt.session+'.txt', 'w', 1)

iteration = 0
epochs = int(opt.iterations / len(trainset)) # number of epochs from number of target iterations 

for epoch in range(epochs):

	print('=== Epoch: ', epoch, '========================================')
예제 #7
0
                pbar.update(processed, **training_stat)
            pbar.finish()

    def predict(self, X):
        X_var = make_var(X)
        return self.forward(X_var).data.numpy()


class batch_iterator():
    def __init__(self):
        self.batches_cnt = 100
        self.cur_batch = -1

    def next(self):
        if self.cur_batch >= self.batches_cnt:
            self.cur_batch = -1
            raise StopIteration()
        self.cur_batch += 1
        bsize = np.random.randint(16, 32)
        return self.cur_batch, np.random.randn(bsize, 1000)

    def __iter__(self):
        return self


if __name__ == '__main__':
    nn = DERN(1000)
    nn.train(batch_iterator(), epochs=10)
    X_test = np.random.rand(3, 1000)
    print nn.predict(X_test)
예제 #8
0
def main():
    nn.train(n_epochs, learning_rate, train_data, train_labels)
    print_model_accuracy(model_save_path)
    plot_random_data(n_rows, n_cols)