Exemplo n.º 1
0
def erbp_convnet_2L(data_train, data_classify, targets_classify, nepochs=10):
    N_FEAT1 = 16
    N_FEAT2 = 32
    stride = 2
    ksize = 5

    exp_name = '/tmp/mnist_convnet_2L'
    exp_name_test = '/tmp/mnist_convnet_2L_test/'

    inputsize = 28
    Nchannel = 1
    Nxy = inputsize*inputsize
    Nv = Nxy*Nchannel
    Nl = 10
    Nconv1 = Nv//stride//stride*N_FEAT1//Nchannel
    Nconv2 = Nconv1//stride//stride*N_FEAT2//N_FEAT1
    Nh = 100

    t_sample_test = 3000
    t_sample_train = 1500
    N_train = 5000
    N_test = 1000
    test_every = 1
    inp_fact = 25

    sim_ticks = N_train*t_sample_train
    sim_ticks_test = N_test*t_sample_test

    np.random.seed(100)

    wpg = 96
    wgp = 37

    erbp_ptype_ = erbp_ptype.copy()
    erbp_ptype_.rr_num_bits = 12
    erbp_ptype_.hiac = [-7, OFF, OFF]

    erf_ntype_ = erf_ntype.copy()
    erf_ntype_.plasticity_type = [erbp_ptype_, nonplastic_ptype]
    erf_ntype_.Wgain[0] = 2

    net_graph = LogicalGraphSetup()

    pop_data = net_graph.create_population(Population(name='pop_data',
                                                      n=Nv,
                                                      core=-1,
                                                      is_external=True))
    pop_lab = net_graph.create_population(Population(name='pop_lab',
                                                     n=Nl,
                                                     core=-1,
                                                     is_external=True))
    pop_conv1 = net_graph.create_population(Population(name='pop_conv1',
                                                       n=Nconv1,
                                                       core=0,
                                                       neuron_cfg=erf_ntype_))
    pop_conv2 = net_graph.create_population(Population(name='pop_conv2',
                                                       n=Nconv2,
                                                       core=1,
                                                       neuron_cfg=erf_ntype_))
    pop_hid = net_graph.create_population(Population(name='pop_hid',
                                                     n=Nh,
                                                     core=1,
                                                     neuron_cfg=erf_ntype_))
    pop_out = net_graph.create_population(Population(name='pop_out',
                                                     n=Nl,
                                                     core=1,
                                                     neuron_cfg=output_ntype))

    net_graph.create_connection(pop_data, pop_conv1, 0,
                                connect_conv2dbank(inputsize,
                                                   Nchannel,
                                                   N_FEAT1,
                                                   stride,
                                                   ksize))
    net_graph.create_connection(pop_conv1, pop_conv2, 0,
                                connect_conv2dbank(inputsize//stride,
                                                   N_FEAT1,
                                                   N_FEAT2,
                                                   stride,
                                                   ksize))
    net_graph.create_connection(pop_conv2, pop_hid, 0,
                                connect_random_uniform(low=-16, high=16))
    net_graph.create_connection(pop_hid, pop_out, 0,
                                connect_random_uniform(low=-4, high=4))

    pop_err_pos = net_graph.create_population(Population(name='pop_err_pos',
                                                         n=Nl,
                                                         core=0,
                                                         neuron_cfg=error_ntype))
    pop_err_neg = net_graph.create_population(Population(name='pop_err_neg',
                                                         n=Nl,
                                                         core=0,
                                                         neuron_cfg=error_ntype))

    net_graph.create_connection(pop_out, pop_err_pos, 0, connect_one2one(-wpg))
    net_graph.create_connection(pop_out, pop_err_neg, 0, connect_one2one(wpg))

    net_graph.create_connection(pop_lab, pop_err_pos, 0, connect_one2one(wpg))
    net_graph.create_connection(pop_lab, pop_err_neg, 0, connect_one2one(-wpg))

    [p, w] = connect_shuffle(2000)(pop_err_pos, pop_conv1)
    net_graph.create_connection(pop_err_pos, pop_conv1, 1, [p, +w])
    net_graph.create_connection(pop_err_neg, pop_conv1, 1, [p, -w])

    [p, w] = connect_shuffle(2000)(pop_err_pos, pop_conv2)
    net_graph.create_connection(pop_err_pos, pop_conv2, 1, [p, +w])
    net_graph.create_connection(pop_err_neg, pop_conv2, 1, [p, -w])

    [p, w] = connect_shuffle(3000)(pop_err_pos, pop_hid)
    net_graph.create_connection(pop_err_pos, pop_hid, 1, [p, +w])
    net_graph.create_connection(pop_err_neg, pop_hid, 1, [p, -w])

    net_graph.create_connection(pop_err_pos, pop_out, 1, connect_one2one(wgp))
    net_graph.create_connection(pop_err_neg, pop_out, 1, connect_one2one(-wgp))

    setup = net_graph.generate_multicore_setup(NSATSetup)

    spk_rec_mon = [[] for i in range(setup.ncores)]

    cfg_train = setup.create_configuration_nsat(sim_ticks=sim_ticks,
                                                w_check=False,
                                                spk_rec_mon=spk_rec_mon,
                                                monitor_spikes=False,
                                                gated_learning=[True, True],
                                                plasticity_en=[True, True])

    spk_rec_mon = [[] for i in range(setup.ncores)]
    spk_rec_mon[pop_out.core] = pop_out.addr

    cfg_test = cfg_train.copy()
    cfg_test.sim_ticks = sim_ticks_test
    cfg_test.plasticity_en[:] = False
    cfg_test.spk_rec_mon = spk_rec_mon
    cfg_test.monitor_spikes = True

    SL_train = create_spike_train(data_train[:N_train],
                                  t_sample_train,
                                  scaling=inp_fact,
                                  with_labels=True)
    ext_evts_data_train = nsat.exportAER(SL_train)

    SL_test = create_spike_train(data_classify[:N_test],
                                 t_sample_test,
                                 scaling=inp_fact,
                                 with_labels=False)
    ext_evts_data_test = nsat.exportAER(SL_test)

    cfg_test.set_ext_events(ext_evts_data_test)
    cfg_train.set_ext_events(ext_evts_data_train)

    c_nsat_writer_train = nsat.C_NSATWriter(cfg_train,
                                            path=exp_name,
                                            prefix='')
    c_nsat_writer_train.write()

    c_nsat_writer_test = nsat.C_NSATWriter(cfg_test,
                                           path=exp_name_test,
                                           prefix='')
    c_nsat_writer_test.write()

    fname_train = c_nsat_writer_train.fname
    fname_test = c_nsat_writer_test.fname
    c_nsat_reader_test = nsat.C_NSATReader(cfg_test, fname_test)

    pip, total_time = [], []
    t0t, tft = 0, 0
    for i in range(nepochs):
        t0 = time.time()
        nsat.run_c_nsat(fname_train)
        tf = time.time()

        for j in range(setup.ncores):
            # train->test
            shutil.copy(exp_name+'/_shared_mem_core_{0}.dat'.format(
                j), exp_name_test+'/_wgt_table_core_{0}.dat'.format(j))
            # train->train
            shutil.copy(exp_name+'/_shared_mem_core_{0}.dat'.format(
                j), exp_name+'/_wgt_table_core_{0}.dat'.format(j))
        if test_every > 0:
            if i % test_every == test_every-1:
                t0t = time.time()
                nsat.run_c_nsat(fname_test)
                tft = time.time()
                acc, slout = test_accuracy(c_nsat_reader_test,
                                           targets=targets_classify[:N_test],
                                           pop=pop_out,
                                           sim_ticks=sim_ticks_test,
                                           duration=t_sample_test)
                pip.append(acc)
        total_time.append(tf - t0 + tft - t0t)
    return pip, total_time
def main():
    output_file = 'vgg19_sparse_model_ciafr100.dat'

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    transform_val = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    trainset = torchvision.datasets.CIFAR100(root='./',
                                             train=True,
                                             download=True,
                                             transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=6)

    testset = torchvision.datasets.CIFAR100(root='./',
                                            train=False,
                                            download=True,
                                            transform=transform_val)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=2)

    conv_net = vgg19_bn(num_classes=100).cuda()
    conv_net.train()
    criterion = nn.CrossEntropyLoss()

    init_lr = 1.0
    lam = 1e-6
    av_param = 0.0
    training_specs = IterationSpecs(step_size=init_lr,
                                    mom_ts=9.5,
                                    b_mom_ts=9.5,
                                    weight_decay=5e-4,
                                    av_param=av_param)
    optimizer = xRDA(conv_net.parameters(),
                     it_specs=training_specs,
                     prox=l1_prox(lam=lam, maximum_factor=500))

    lr = init_lr
    prev_train_acc = 0
    prev_sparsity = 0
    for epoch in range(500):
        total = 0
        correct = 0
        for data in trainloader:
            # get the inputs
            inputs, labels = data
            inputs = Variable(inputs).cuda()
            labels = Variable(labels).cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = conv_net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # Calculate train accuracy
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum()

        train_acc = correct
        sparsity = sum(
            torch.nonzero(x).size()[0] for x in list(conv_net.parameters()))
        accuracy = 10000 * correct / total
        t_accuracy = test_accuracy(testloader, conv_net, cuda=True)
        print(
            'Epoch:%d %% Training Accuracy: %d.%02d %% Test Accuracy: %d.%02d %% Sparsity: %d'
            % (epoch + 1, accuracy / 100, accuracy % 100, t_accuracy / 100,
               t_accuracy % 100, sparsity))

        # At about every 40 epochs, halve step size and double averaging.
        if epoch in [60, 100, 140, 180, 220, 260, 300, 340, 380, 420]:
            lr /= 2
            training_specs.set_step_size(lr)
            av_param = 1.0 - (1.0 - av_param) / 2.0
            training_specs.set_av_param(av_param)

    # Calculate accuracy and save output.
    final_accuracy = test_accuracy(testloader, conv_net, cuda=True)
    print('Accuracy of the network on the 10000 test images: %d.%02d %%' %
          (final_accuracy / 100, final_accuracy % 100))
    torch.save(conv_net, output_file)
Exemplo n.º 3
0
    ]

fnn = input_data(shape=[None, row * col], name='input')
fnn = fully_connected(fnn, 2000, activation='sigmoid')
fnn = fully_connected(fnn, 500, activation='sigmoid')
fnn = fully_connected(fnn, 500, activation='sigmoid')
fnn = fully_connected(fnn, 500, activation='sigmoid')
fnn = fully_connected(fnn, 500, activation='sigmoid')

fnn = fully_connected(fnn, n_classes, activation='sigmoid')

fnn = regression(fnn, learning_rate=learning_rate, name='targets')

model = tflearn.DNN(fnn)

# model.fit({'input': X},
#           {'targets': Y},
#           n_epoch=n_epoch,
#           validation_set=({'input': test_x_1}, {'targets': test_y_1}),
#           snapshot_step=500,
#           show_metric=True,
#           run_id='2B')
#
# model.save(model_name)

model.load(model_name)

test_accuracy(model, test_x_2, test_y_2, row, col)
#draw_heatmap_with_test_data(model, num_square, 100, raw_img_data_dir, skip=1)
#draw_heatmap_with_realtime(model, num_square, row, col, block_size)
Exemplo n.º 4
0
def main():
    output_file = 'vgg19_sparse_model.dat'
    batch_size = 128
    epoch_count = 600

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    transform_val = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    trainset = torchvision.datasets.CIFAR10(root='./',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4)

    testset = torchvision.datasets.CIFAR10(root='./',
                                           train=False,
                                           download=True,
                                           transform=transform_val)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=2)

    conv_net = vgg19_bn(num_classes=10).cuda()
    conv_net.train()
    criterion = nn.CrossEntropyLoss()

    init_lr = 1.0
    lam = 1e-6
    av_param = 0.0
    training_specs = CosineSpecs(max_iter=math.ceil(50000 / batch_size) *
                                 epoch_count,
                                 init_step_size=init_lr,
                                 mom_ts=10.0,
                                 b_mom_ts=10.0,
                                 weight_decay=5e-4)
    optimizer = xRDA(conv_net.parameters(),
                     it_specs=training_specs,
                     prox=l1_prox(lam=lam, maximum_factor=500))

    lr = init_lr
    prev_train_acc = 0
    prev_sparsity = 0
    for epoch in range(epoch_count):
        total = 0
        correct = 0
        for data in trainloader:
            # get the inputs
            inputs, labels = data
            inputs = Variable(inputs).cuda()
            labels = Variable(labels).cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = conv_net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # Calculate train accuracy
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum()

        train_acc = correct
        sparsity = sum(
            torch.nonzero(x).size()[0] for x in list(conv_net.parameters()))
        accuracy = 10000 * correct / total
        t_accuracy = test_accuracy(testloader, conv_net, cuda=True)
        print(
            'Training Accuracy: %d.%02d %% Test Accuracy: %d.%02d %% Sparsity: %d'
            % (accuracy / 100, accuracy % 100, t_accuracy / 100,
               t_accuracy % 100, sparsity))

    # Calculate accuracy and save output.
    final_accuracy = test_accuracy(testloader, conv_net, cuda=True)
    print('Accuracy of the network on the 10000 test images: %d.%02d %%' %
          (final_accuracy / 100, final_accuracy % 100))
    torch.save(conv_net, output_file)
Exemplo n.º 5
0
def erbp_mlp_2L_multicore(data_train, data_classify, targets_classify,
                          nepochs=10):
    exp_name = '/tmp/mnist_mlp_2L'
    exp_name_test = '/tmp/mnist_mlp_2L_test/'

    inputsize = 28
    Nchannel = 1
    Nxy = inputsize*inputsize
    Nv = Nxy*Nchannel
    Nl = 10
    Nh = 100

    t_sample_test = 3000
    t_sample_train = 1500
    N_train = 500
    N_test = 100
    test_every = 1
    inp_fact = 25

    sim_ticks = N_train*t_sample_train
    sim_ticks_test = N_test*t_sample_test

    np.random.seed(100)

    wpg = 96
    wgp = 37

    net_graph = LogicalGraphSetup()

    pop_data = net_graph.create_population(Population(name='data',
                                                      n=Nv,
                                                      core=-1,
                                                      is_external=True))
    pop_lab = net_graph.create_population(Population(name='lab',
                                                     n=Nl,
                                                     core=-1,
                                                     is_external=True))
    pop_hid1 = net_graph.create_population(Population(name='hid1',
                                                      n=Nh,
                                                      core=0,
                                                      neuron_cfg=erf_ntype))
    pop_hid2 = net_graph.create_population(Population(name='hid2',
                                                      n=Nh,
                                                      core=1,
                                                      neuron_cfg=erf_ntype))
    pop_out = net_graph.create_population(Population(name='out',
                                                     n=Nl,
                                                     core=0,
                                                     neuron_cfg=output_ntype))
    pop_err_pos = net_graph.create_population(Population(name='err_pos',
                                                         n=Nl,
                                                         core=0,
                                                         neuron_cfg=error_ntype))
    pop_err_neg = net_graph.create_population(Population(name='err_neg',
                                                         n=Nl,
                                                         core=0,
                                                         neuron_cfg=error_ntype))

    net_graph.create_connection(pop_data, pop_hid1, 0,
                                connect_random_uniform(low=-16, high=16))
    net_graph.create_connection(pop_hid1, pop_hid2, 0,
                                connect_random_uniform(low=-16, high=16))
    net_graph.create_connection(pop_hid2, pop_out,  0,
                                connect_random_uniform(low=-4, high=4))

    net_graph.create_connection(pop_out, pop_err_pos, 0, connect_one2one(-wpg))
    net_graph.create_connection(pop_out, pop_err_neg, 0, connect_one2one(wpg))

    net_graph.create_connection(pop_lab, pop_err_pos, 0, connect_one2one(wpg))
    net_graph.create_connection(pop_lab, pop_err_neg, 0, connect_one2one(-wpg))

    p, w = connect_shuffle(3000)(pop_err_pos, pop_hid1)

    net_graph.create_connection(pop_err_pos, pop_hid1, 1, [p,  w])
    net_graph.create_connection(pop_err_neg, pop_hid1, 1, [p, -w])

    p, w = connect_shuffle(3000)(pop_err_pos, pop_hid2)

    net_graph.create_connection(pop_err_pos, pop_hid2, 1, [p,  w])
    net_graph.create_connection(pop_err_neg, pop_hid2, 1, [p, -w])

    net_graph.create_connection(pop_err_pos, pop_out, 1, connect_one2one(wgp))
    net_graph.create_connection(pop_err_neg, pop_out, 1, connect_one2one(-wgp))

    setup = net_graph.generate_multicore_setup(NSATSetup)

    spk_rec_mon = [[] for i in range(setup.ncores)]
    spk_rec_mon[pop_out.core] = pop_out.addr

    cfg_train = setup.create_configuration_nsat(sim_ticks=sim_ticks,
                                                w_check=False,
                                                spk_rec_mon=spk_rec_mon,
                                                monitor_spikes=True,
                                                gated_learning=True,
                                                plasticity_en=True)

    spk_rec_mon = [[] for i in range(setup.ncores)]
    spk_rec_mon[pop_out.core] = pop_out.addr

    cfg_test = cfg_train.copy()
    cfg_test.sim_ticks = sim_ticks_test
    cfg_test.plasticity_en[:] = False
    cfg_test.spk_rec_mon = spk_rec_mon
    cfg_test.monitor_spikes = True

    SL_train = create_spike_train(data_train[:N_train],
                                  t_sample_train,
                                  scaling=inp_fact,
                                  with_labels=True)
    ext_evts_data_train = nsat.exportAER(SL_train)

    SL_test = create_spike_train(data_classify[:N_test],
                                 t_sample_test,
                                 scaling=inp_fact,
                                 with_labels=False)
    ext_evts_data_test = nsat.exportAER(SL_test)

    cfg_test.set_ext_events(ext_evts_data_test)
    cfg_train.set_ext_events(ext_evts_data_train)

    c_nsat_writer_train = nsat.C_NSATWriter(cfg_train,
                                            path=exp_name,
                                            prefix='')
    c_nsat_writer_train.write()

    c_nsat_writer_test = nsat.C_NSATWriter(cfg_test,
                                           path=exp_name_test,
                                           prefix='')
    c_nsat_writer_test.write()

    fname_train = c_nsat_writer_train.fname
    fname_test = c_nsat_writer_test.fname

    c_nsat_reader_test = nsat.C_NSATReader(cfg_test, fname_test)

    pip, tt = [], []
    tft, t0t = 0, 0
    for i in range(nepochs):
        t0 = time.time()
        nsat.run_c_nsat(fname_train)
        tf = time.time()

        for j in range(setup.ncores):
            # train->test
            shutil.copy(exp_name+'/_shared_mem_core_{0}.dat'.format(
                j), exp_name_test+'/_wgt_table_core_{0}.dat'.format(j))
            # train->train
            shutil.copy(exp_name+'/_shared_mem_core_{0}.dat'.format(
                j), exp_name+'/_wgt_table_core_{0}.dat'.format(j))
        if test_every > 0:
            if i % test_every == test_every-1:
                t0t = time.time()
                nsat.run_c_nsat(fname_test)
                tft = time.time()
                acc, slout = test_accuracy(c_nsat_reader_test,
                                           targets=targets_classify[:N_test],
                                           pop=pop_out,
                                           sim_ticks=sim_ticks_test,
                                           duration=t_sample_test)
                pip.append(acc)
        t_total = (tf - t0) + (tft - t0t)
        tt.append(t_total)
    return pip, tt
Exemplo n.º 6
0
        # Work back in time through the backpointers to get the best sequence
        predicted_tags = [None for x in range(len(S))]
        predicted_tags[0] = self.tags[numpy.argmax(T[0, :])]
        predicted_tags[-1] = self.tags[numpy.argmax(T[-1, :])]
        for i in range(len(S) - 2, 0, -1):
            ind = numpy.argmax(T[i, :])
            tag = self.tags[int(backpointers[i + 1, ind])]
            predicted_tags[i] = tag
        return predicted_tags

    def train(self, sentences, targets):
        # Get word-counts in the training set
        self.get_word_counts(sentences)
        # Use counts to replace infrequent words with UNK
        if self.handle_unks:
            self.replace_UNK(sentences, self.unk_freq)
        # Get counts to compute transition and emission probs later
        self.compute_counts(sentences, targets)
        # Compute P(w|t) and P(ti|ti-1)
        self.estimate_params()
        self.tags = list(self.pos_unigram_counts.keys())
        self.tags.remove("<START>")


if __name__ == "__main__":
    train_sentences, train_targets = read_train_data('train.txt')
    test_sentences, test_targets = read_train_data('test.txt')
    hmm = HMM(unk_freq=1)
    hmm.train(train_sentences, train_targets)
    test_accuracy(hmm, test_sentences, test_targets)
Exemplo n.º 7
0
                        self.word_tag_counts[word][tag] = 1
                else:
                    self.word_tag_counts[word] = {tag: 1}
                self.tag_frequencies[tag] = self.tag_frequencies.get(tag, 0.0) + 1

    def train(self, sentences, targets):
        self.count_occurrences(sentences, targets)
        for word in self.word_tag_counts:
            tag_frequencies = self.word_tag_counts[word]
            most_frequent = sorted(tag_frequencies.items(), key=lambda item: item[1])[-1]
            self.most_frequent_tag[word] = most_frequent[0]
        self.top_tag = sorted(self.tag_frequencies.items(), key=lambda item: item[1])[-1][0]


    def predict(self, sentence):
        predicted_tags = []
        for word in sentence:
            if word in self.most_frequent_tag:
                tag = self.most_frequent_tag[word]
            else:
                tag = self.top_tag
            predicted_tags.append(tag)
        return predicted_tags

if __name__ == "__main__":
    train_sentences, train_targets = read_train_data('train.txt')
    test_sentences, test_targets = read_train_data('test.txt')
    baseline = FrequencyBaseline()
    baseline.train(train_sentences, train_targets)
    test_accuracy(baseline, test_sentences, test_targets)
Exemplo n.º 8
0
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters())

    train_steps = int(np.ceil(len(train_positives) / batch_size))
    test_steps = int(np.ceil(len(test_positives) / batch_size))

    # training loop
    train_losses, test_losses = train(model,
                                      contrastive_loss,
                                      optimizer,
                                      run_generator(train_positives,
                                                    train_negatives,
                                                    train_images),
                                      run_generator(test_positives,
                                                    test_negatives,
                                                    test_images,
                                                    train=False),
                                      train_steps,
                                      test_steps,
                                      epochs=20)

    # Evaluate
    test_accuracy(H,
                  W,
                  test_images,
                  test_positives,
                  test_negatives,
                  predict,
                  threshold=0.7)
Exemplo n.º 9
0
for ds in datasets:

    X_train, y_train = edd.load(ds,
                                dataset='train',
                                cache_dir='./' + ds,
                                cache_subdir='datasets')
    X_test, y_test = edd.load(ds,
                              dataset='test',
                              cache_dir='./' + ds,
                              cache_subdir='datasets')

    x_train = nn.preprocessing(X_train[0])
    x_test = nn.preprocessing(X_test[0])

    model = nn.model(ds, shape=x_train.shape[1:])
    model.compile(**nn.compile_args)
    history = model.fit(x=x_train, y=y_train, **nn.fit_args)

    ##	From here on, one should be able to use already defined methods as showed in the following lines.
    ##	Let us know if you face any issues with that.

    #training history plots
    train_plots(history, ds, True)

    #evaluation plots and scores
    y_pred = model.predict(X_test).ravel()
    roc_auc(y_pred, y_test, ds, True)
    test_accuracy(y_pred, y_test, ds)
    test_f1_score(y_pred, y_test, ds)