def __init__(self, args):
        super(Model, self).__init__()

        self.se_resnet = se_resnet18()
        self.convnet = convnet()

        self.fc_in = torch.nn.Linear(256, 256)
        self.fc_out = torch.nn.Linear(256, 199)
Beispiel #2
0
def fit_convnet(cell, stimulus_type):
    """
    Demo code for fitting a convnet model

    """

    # initialize model
    mdl = convnet(cell, stimulus_type, num_filters=(8, 16), filter_size=(13, 13),
                  weight_init='normal', l2_reg=0.01, mean_adapt=False)

    # train
    batchsize = 5000            # number of samples per batch
    num_epochs = 10             # number of epochs to train for
    save_weights_every = 50     # save weights every n iterations

    mdl.train(batchsize, num_epochs=num_epochs, save_every=save_weights_every)

    return mdl
    def _build_model(self):
        self.n_color_cls = 8

        self.net = models.convnet(num_classes=self.option.n_class)
        self.pred_net_r = models.Predictor(input_ch=32, num_classes=self.n_color_cls)
        self.pred_net_g = models.Predictor(input_ch=32, num_classes=self.n_color_cls)
        self.pred_net_b = models.Predictor(input_ch=32, num_classes=self.n_color_cls)

        self.loss = nn.CrossEntropyLoss(ignore_index=255)
        self.color_loss = nn.CrossEntropyLoss(ignore_index=255)


        if self.option.cuda:
            self.net.cuda()
            self.pred_net_r.cuda()
            self.pred_net_g.cuda()
            self.pred_net_b.cuda()
            self.loss.cuda()
            self.color_loss.cuda()
Beispiel #4
0
def fit_convnet(cell, stimulus_type):
    """
    Demo code for fitting a convnet model

    """

    # initialize model
    mdl = convnet(cell,
                  stimulus_type,
                  num_filters=(8, 16),
                  filter_size=(13, 13),
                  weight_init='normal',
                  l2_reg=0.01,
                  mean_adapt=False)

    # train
    batchsize = 5000  # number of samples per batch
    num_epochs = 10  # number of epochs to train for
    save_weights_every = 50  # save weights every n iterations

    mdl.train(batchsize, num_epochs=num_epochs, save_every=save_weights_every)

    return mdl
Beispiel #5
0
if __name__ == '__main__':

    # Let's load and process the dataset
    import numpy as np
    from fuel.datasets.dogs_vs_cats import DogsVsCats

    from fuel.streams import DataStream
    from fuel.schemes import ShuffledScheme
    from fuel.transformers.image import RandomFixedSizeCrop
    from fuel.transformers import Flatten

    # Load the training set
    train = DogsVsCats(('train',),subset=slice(0, 20)) #subset=slice(0, 20000)
    test = DogsVsCats(('test',),subset=slice(0,20))
    input_size = (150,150)

    from models import mlp,convnet
    

    #main(None,mlp(input_size[0]*input_size[1]*3), train, test, num_epochs=1, input_size=input_size, batch_size=5, num_batches=20, flatten_stream=True)
    main("test1.txt", convnet(input_size), train, test, num_epochs=1, input_size=input_size, batch_size=64, num_batches=100)
    
#        from deep_res import build_cnn
 #       model = build_cnn(x,3,64)

# 
# THEANO_FLAGS='cuda.root=/usr/lib/nvidia-cuda-toolkit/', THEANO_FLAGS=cuda.root=/usr/lib/nvidia-cuda-toolkit/,device=gpu,floatX=float32 python dogs_cats.py
# THEANO_FLAGS=device=gpu   

# THEANO_FLAGS=exception_verbosity=high,optimizer=None
    test_data = QD_Dataset(mtype="test", root=args.data_root)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=args.test_bs,
                                              shuffle=True)

    num_classes = train_data.get_number_classes()

    print("Train images number: %d" % len(train_data))
    print("Test images number: %d" % len(test_data))

    net = None
    if args.model == 'resnet34':
        net = resnet34(num_classes)
    elif args.model == 'convnet':
        net = convnet(num_classes)

    if args.ngpu > 1:
        net = nn.DataParallel(net)

    if args.ngpu > 0:
        net.cuda()

    print(net)

    optimizer = torch.optim.SGD(net.parameters(),
                                state['learning_rate'],
                                momentum=state['momentum'],
                                weight_decay=state['weight_decay'])

    Train_Loss = []
Beispiel #7
0
                    Img[i][j] = [1, 1, 1]
                else:
                    Img[i][j] = [
                        1 - canvas[i * shape + j] / 255,
                        1 - canvas[i * shape + j] / 255,
                        1 - canvas[i * shape + j] / 255
                    ]
        plt.imshow(Img)
        plt.title(name)
        plt.savefig("./" + root + "/ans/" + str(idx) + "_" + name + ".png")

    net = None
    if args.model == 'resnet34':
        net = resnet34(args.num_classes)
    elif args.model == 'convnet':
        net = convnet(args.num_classes)

    if args.ngpu > 1:
        net = nn.DataParallel(net)

    if args.ngpu > 0:
        net.cuda()

    net.load_state_dict(torch.load('./' + args.model_file_name))
    print("Model loaded, start evaluating.")

    generate_eval_dataset()

    eval_data = QD_Dataset(mtype="eval", root="Dataset")
    eval_loader = torch.utils.data.DataLoader(eval_data,
                                              batch_size=args.eval_bs,
Beispiel #8
0
if __name__ == '__main__':
    print('PAC 2018')

    example = data_dir + subjects[0]['id'] + '.nii'
    image_size = nib.load(example).shape

    print('Image size:', image_size)

    features = np.zeros((len(subjects), 4))
    labels = np.zeros((len(subjects), 2))

    for i, sub in enumerate(subjects):
        features[i, 0] = sub['tiv']
        features[i, 1] = sub['site']
        features[i, 2] = sub['gender']
        features[i, 3] = sub['age']

        if sub['label'] == 1:
            labels[i, :] = [1, 0]
        else:
            labels[i, :] = [0, 1]

    f = hdf5_smash(subjects)

    model = convnet(image_size)
    model.fit_generator(batch_gen(f, labels, features),
                        steps_per_epoch=None,
                        epochs=10,
                        validation_data=0.1,
                        shuffle=False)