def Inference(in_op,num_labels):
    conv1 = Conv(in_op,'conv1',3,1,80)
    conv2 = Conv(conv1,'conv2',3,1,64)
    pool1 = Maxpool(conv2,'pool1',2,2)

    conv3 = Conv(pool1,'conv3',3,1,64)
#    conv4 = Conv(conv3,'conv4',3,1,64)
    pool2 = Maxpool(conv3,'pool2',2,2)

#    conv5 = Conv(pool2,'conv5',3,1,32)
#    conv6 = Conv(conv5,'conv6',3,1,32)
#    pool3 = Maxpool(conv6,'pool3',2,2)
##
#    conv7 = Conv(pool3,'conv7',3,1,246)
#    conv8 = Conv(conv7,'conv8',3,1,256)
#    pool4 = Maxpool(conv8,'pool4',2,2)
#
#    conv9 = Conv(pool4,'conv9',3,1,128)
#    conv10= Conv(conv9,'conv10',3,1,128)
#    pool5 = Maxpool(conv10,'pool5',2,2)

    flat  = Flatten(pool2)
    fc1   = Fc(flat, 'fc1', 128, activation = 'relu')
#    drop1 = tf.nn.dropout(fc1,0.75)
#    fc2   = Fc(drop1,'fc2',32)
#    drop2 = tf.nn.dropout(fc2,0.5)
    logit = Fc(fc1, 'logit', num_labels)
    return logit
 def __init__(self, nstack, inp_dim, oup_dim, bn=False, increase=0):
     super(PoseNet, self).__init__()
     
     self.nstack = nstack
     self.pre = nn.Sequential(
         Conv(3, 64, 7, 2, bn=True, relu=True),
         Residual(64, 128),
         Pool(2, 2),
         Residual(128, 128),
         Residual(128, inp_dim)
     )
     
     self.hgs = nn.ModuleList( [
     nn.Sequential(
         Hourglass(4, inp_dim, bn, increase),
     ) for i in range(nstack)] )
     
     self.features = nn.ModuleList( [
     nn.Sequential(
         Residual(inp_dim, inp_dim),
         Conv(inp_dim, inp_dim, 1, bn=True, relu=True)
     ) for i in range(nstack)] )
     
     self.outs = nn.ModuleList( [Conv(inp_dim, oup_dim, 1, relu=False, bn=False) for i in range(nstack)] )
     self.merge_features = nn.ModuleList( [Merge(inp_dim, inp_dim) for i in range(nstack-1)] )
     self.merge_preds = nn.ModuleList( [Merge(oup_dim, inp_dim) for i in range(nstack-1)] )
     self.nstack = nstack
    def __init__(self, nstack=1, inp_dim=256, oup_dim=6, bn=False, increase=0):
        super(DiscConfNet, self).__init__()

        self.nstack = nstack
        self.pre = nn.Sequential(
            Conv(6, 64, 7, 2, bn=True,
                 relu=True),  ##  in place of 15  3 was there
            Residual(64, 128),
            Pool(2, 2),
            Residual(128, 128),
            Residual(128, inp_dim))

        self.hgs = nn.ModuleList([
            nn.Sequential(Hourglass(4, inp_dim, bn, increase), )
            for i in range(nstack)
        ])

        self.features = nn.ModuleList([
            nn.Sequential(Residual(inp_dim, inp_dim),
                          Conv(inp_dim, inp_dim, 1, bn=True, relu=True))
            for i in range(nstack)
        ])

        self.outs = nn.ModuleList([
            Conv(inp_dim, oup_dim, 1, relu=False, bn=False)
            for i in range(nstack)
        ])
        self.merge_features = nn.ModuleList(
            [Merge(inp_dim, inp_dim) for i in range(nstack - 1)])
        self.merge_preds = nn.ModuleList(
            [Merge(oup_dim, inp_dim) for i in range(nstack - 1)])
        self.nstack = nstack
        self.fc1 = nn.Linear(64 * 64 * 6, 6)
Beispiel #4
0
 def __init__(self,
              nstack=8,
              layer=4,
              in_channel=256,
              out_channel=16,
              increase=0):
     super(PoseNet, self).__init__()
     self.nstack = nstack
     self.pre = nn.Sequential(Conv(3, 64, 7, 2, bn=True, relu=True),
                              Residual(64, 128), nn.MaxPool2d(2, 2),
                              Residual(128, 128), Residual(128, in_channel))
     self.hourglass = nn.ModuleList([
         nn.Sequential(Hourglass(layer, in_channel, inc=increase))
         for _ in range(nstack)
     ])
     self.feature = nn.ModuleList([
         nn.Sequential(Residual(in_channel, in_channel),
                       Conv(in_channel, in_channel, 1, bn=True, relu=True))
         for _ in range(nstack)
     ])
     self.outs = nn.ModuleList([
         Conv(in_channel, out_channel, 1, bn=False, relu=False)
         for _ in range(nstack)
     ])
     self.merge_feature = nn.ModuleList(
         [Convert(in_channel, in_channel) for _ in range(nstack - 1)])
     self.merge_pred = nn.ModuleList(
         [Convert(out_channel, in_channel) for _ in range(nstack - 1)])
Beispiel #5
0
    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        # self.fc1 = Linear(28*28, 25, noise_std=1e-0, device=self.device)
        self.fc1 = Conv(1, 25, kernel_size=25, noise_std=1e-0, act='TanH', device=self.device)
        self.act1 = Activation('TanH')
        self.fc2 = Linear(16*25, 10, noise_std=1e-0, device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.fc1, self.fc2]
Beispiel #6
0
 def __init__(self, in_channels, growth_rate, args):
     super(_DenseLayer, self).__init__()
     self.group_1x1 = args.group_1x1
     self.group_3x3 = args.group_3x3
     ### 1x1 conv i --> b*k
     self.conv_1 = Conv(in_channels, args.bottleneck * growth_rate,
                        kernel_size=1, groups=self.group_1x1)
     ### 3x3 conv b*k --> k
     self.conv_2 = Conv(args.bottleneck * growth_rate, growth_rate,
                        kernel_size=3, padding=1, groups=self.group_3x3)
Beispiel #7
0
    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='TanH', device=self.device)
        self.act1 = Activation('TanH')
        self.pool1 = Pool(2, device=self.device)

        self.fc1 = Linear(6*12*12, 100, noise_std=1e-0, act='TanH', device=self.device)
        self.act2 = Activation('TanH')
        self.fc2 = Linear(100, 10, noise_std=1e-0, act='TanH', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.fc1, self.fc2]
Beispiel #8
0
class ShallowConvNet(Net):

    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='TanH', device=self.device)
        self.act1 = Activation('TanH')
        self.pool1 = Pool(2, device=self.device)

        self.fc1 = Linear(6*12*12, 100, noise_std=1e-0, act='TanH', device=self.device)
        self.act2 = Activation('TanH')
        self.fc2 = Linear(100, 10, noise_std=1e-0, act='TanH', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.fc1, self.fc2]

    def forward(self, input):
        conv_out_1 = self.conv1.forward(input)
        act_out_1 = self.act1.forward(conv_out_1)
        pool_out_1 = self.pool1.forward(act_out_1)

        pool_out_1 = pool_out_1.reshape(len(pool_out_1), -1)

        fc_out_1 = self.fc1.forward(pool_out_1)
        act_out_2 = self.act2.forward(fc_out_1)
        fc_out_2 = self.fc2.forward(act_out_2)
        output = self.softmax.forward(fc_out_2)

        return output
Beispiel #9
0
 def __init__(self, in_channels, out_channels, args):
     super(_Transition, self).__init__()
     self.conv = Conv(in_channels,
                      out_channels,
                      kernel_size=1,
                      groups=args.group_1x1)
     self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
Beispiel #10
0
 def __init__(self, in_channels, growth_rate, args):
     super(_DenseLayer, self).__init__()
     self.group_1x1 = args.group_1x1
     self.group_3x3 = args.group_3x3
     ### 1x1 conv i --> b*k
     #self.conv_1 = CSDN_Tem(in_channels, growth_rate, dropout=args.dropout_rate)
     ### 3x3 conv b*k --> k
     self.conv_2 = Conv(in_channels, growth_rate, dropout=args.dropout_rate)
Beispiel #11
0
    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act1 = Activation('ReLU')
        self.pool1 = Pool(2, device=self.device)

        self.conv2 = Conv(6, 16, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act2 = Activation('ReLU')
        self.pool2 = Pool(2, device=self.device)

        self.fc1 = Linear(256, 120, noise_std=1e-0, act='ReLU', device=self.device)
        self.act3 = Activation('ReLU')
        self.fc2 = Linear(120, 84, noise_std=1e-0, act='ReLU', device=self.device)
        self.act4 = Activation('ReLU')
        self.fc3 = Linear(84, 10, noise_std=1e-0, act='ReLU', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.conv2, self.fc1, self.fc2, self.fc3]
Beispiel #12
0
def main():
    c = color_codes()
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    try:
        net = load_model('/home/mariano/Desktop/test.tf')
    except IOError:
        x = Input([784])
        x_image = Reshape([28, 28, 1])(x)
        x_conv1 = Conv(filters=32,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(x_image)
        h_pool1 = MaxPool((2, 2), padding='same')(x_conv1)
        h_conv2 = Conv(filters=64,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(h_pool1)
        h_pool2 = MaxPool((2, 2), padding='same')(h_conv2)
        h_fc1 = Dense(1024, activation='relu')(h_pool2)
        h_drop = Dropout(0.5)(h_fc1)
        y_conv = Dense(10)(h_drop)

        net = Model(x,
                    y_conv,
                    optimizer='adam',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + c['b'] +
          'Original (MNIST)' + c['nc'] + c['g'] + ' net ' + c['nc'] + c['b'] +
          '(%d parameters)' % net.count_trainable_parameters() + c['nc'])

    net.fit(mnist.train.images,
            mnist.train.labels,
            val_data=mnist.test.images,
            val_labels=mnist.test.labels,
            patience=10,
            epochs=200,
            batch_size=1024)

    save_model(net, '/home/mariano/Desktop/test.tf')
Beispiel #13
0
class LeNet5(Net):

    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act1 = Activation('ReLU')
        self.pool1 = Pool(2, device=self.device)

        self.conv2 = Conv(6, 16, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act2 = Activation('ReLU')
        self.pool2 = Pool(2, device=self.device)

        self.fc1 = Linear(256, 120, noise_std=1e-0, act='ReLU', device=self.device)
        self.act3 = Activation('ReLU')
        self.fc2 = Linear(120, 84, noise_std=1e-0, act='ReLU', device=self.device)
        self.act4 = Activation('ReLU')
        self.fc3 = Linear(84, 10, noise_std=1e-0, act='ReLU', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.conv2, self.fc1, self.fc2, self.fc3]

    def forward(self, input):
        conv_out_1 = self.conv1.forward(input)
        act_out_1 = self.act1.forward(conv_out_1)
        pool_out_1 = self.pool1.forward(act_out_1)

        conv_out_2 = self.conv2.forward(pool_out_1)
        act_out_2 = self.act2.forward(conv_out_2)
        pool_out_2 = self.pool2.forward(act_out_2)

        pool_out_2 = pool_out_2.reshape(len(pool_out_2), -1)

        fc_out_1 = self.fc1.forward(pool_out_2)
        act_out_3 = self.act3.forward(fc_out_1)
        fc_out_2 = self.fc2.forward(act_out_3)
        act_out_4 = self.act4.forward(fc_out_2)
        fc_out_3 = self.fc3.forward(act_out_4)
        output = self.softmax.forward(fc_out_3)

        return output
Beispiel #14
0
def make_cnn(X_dim, num_class):
    conv = Conv(X_dim,
                n_filter=16,
                h_filter=5,
                w_filter=5,
                stride=1,
                padding=2)
    relu = ReLU()
    maxpool = Maxpool(conv.out_dim, size=2, stride=2)
    conv2 = Conv(maxpool.out_dim,
                 n_filter=20,
                 h_filter=5,
                 w_filter=5,
                 stride=1,
                 padding=2)
    relu2 = ReLU()
    maxpool2 = Maxpool(conv2.out_dim, size=2, stride=2)
    flat = Flatten()
    fc = FullyConnected(np.prod(maxpool2.out_dim), num_class)

    return [conv, relu, maxpool, conv2, relu2, maxpool2, flat, fc]
Beispiel #15
0
 def __init__(self, in_channels, growth_rate, args):
     super(_DenseLayer, self).__init__()
     self.group_1x1 = args.group_1x1
     self.group_3x3 = args.group_3x3
     ### 1x1 conv i --> b*k
     self.conv_1 = LearnedGroupConv(in_channels, args.bottleneck * growth_rate,
                                    kernel_size=1, groups=self.group_1x1,
                                    condense_factor=args.condense_factor,
                                    dropout_rate=args.dropout_rate)
     ### 3x3 conv b*k --> k
     self.conv_2 = Conv(args.bottleneck * growth_rate, in_channels,
                        kernel_size=3, padding=1, groups=self.group_3x3)
    def build_model():
        tf.compat.v1.reset_default_graph()
        x = tf.compat.v1.placeholder(tf.float32, [None, 32, 32, 3]) 
        t = tf.compat.v1.placeholder(tf.float32, [None, 10]) 
        is_training = tf.compat.v1.placeholder(tf.bool)
        layers = [
            Conv((3, 3, 3, 64), tf.nn.relu),
            Conv((3, 3, 64, 64), tf.nn.relu),
            Pooling((1, 2, 2, 1)),
            Conv((3, 3, 64, 128), tf.nn.relu),
            Conv((3, 3, 128, 128), tf.nn.relu),
            Pooling((1, 2, 2, 1)),
            Flatten(),
            Dense(3200, 256, tf.nn.relu),
            Dense(256, 256, tf.nn.relu),
            Dense(256, 10, tf.nn.softmax)
        ]
        y = f_props(layers, x)
        
        params = get_params(layers)
        cost = - tf.reduce_mean(tf.reduce_sum(t * tf_log(y), axis=1))

        return x, t, is_training, y, cost, params
Beispiel #17
0
def inner_model(trainable, x):
    layers_list = [
        Reshape([-1, 28, 28, 1]),
        Conv(32),
        BatchNormalization(),
        Relu(),
        MaxPool(),
        Conv(64),
        BatchNormalization(),
        Relu(),
        MaxPool(),
        Reshape([-1, 7 * 7 * 64]),
        FullyConnected(1024),
        Relu(),
        FullyConnected(10)
    ]
    variable_saver = VariableSaver()
    signal = x
    print('shape', signal.get_shape())
    for idx, layer in enumerate(layers_list):
        signal = layer.contribute(signal, idx, trainable,
                                  variable_saver.save_variable)
        print('shape', signal.get_shape())
    return signal, variable_saver.var_list
Beispiel #18
0
    def __init__(self, in_channels, growth_rate, args):
        super(_DenseLayer, self).__init__()
        ### 1x1 conv: i --> bottleneck * k
        self.conv_1 = DynamicMultiHeadConv(in_channels,
                                           args.bottleneck * growth_rate,
                                           kernel_size=1,
                                           heads=args.heads,
                                           squeeze_rate=args.squeeze_rate,
                                           gate_factor=args.gate_factor)

        ### 3x3 conv: bottleneck * k --> k
        self.conv_2 = Conv(args.bottleneck * growth_rate,
                           growth_rate,
                           kernel_size=3,
                           padding=1,
                           groups=args.group_3x3)
Beispiel #19
0
 def __init__(self, in_channels, out_channels, args, width, height):
     super(_Transition, self).__init__()
     self.conv = Conv(in_channels,
                      out_channels,
                      kernel_size=1,
                      groups=args.group_1x1)
     padding = 0
     if args.kernel_size == 3:
         padding = 1
     self.pool = cmaxgb(out_channels,
                        out_channels,
                        args=args,
                        kernel_size=args.kernel_size,
                        stride=2,
                        padding=padding,
                        width=width,
                        height=height)
Beispiel #20
0
class DenseNet_CNN(Net):

    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        # self.fc1 = Linear(28*28, 25, noise_std=1e-0, device=self.device)
        self.fc1 = Conv(1, 25, kernel_size=25, noise_std=1e-0, act='TanH', device=self.device)
        self.act1 = Activation('TanH')
        self.fc2 = Linear(16*25, 10, noise_std=1e-0, device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.fc1, self.fc2]

    def forward(self, input):
        #input = input.reshape(len(input), -1)

        fc_out_1 = self.fc1.forward(input)
        act_out_1 = self.act1.forward(fc_out_1)

        act_out_1 = act_out_1.reshape(len(act_out_1), -1)

        fc_out_2 = self.fc2.forward(act_out_1)
        output = self.softmax.forward(fc_out_2)
        return output
Beispiel #21
0
    def __init__(self, in_channels, out_channels, args):
        super(_Transition, self).__init__()
        padding = 0
        if args.kernel_size == 3:
            padding = 1

        if args.convs:
            if args.no1x1:
                # purely 3x3 conv stride 2
                self.conv = Conv(in_channels, out_channels, 
                    kernel_size=args.kernel_size, padding=padding, stride=2)
                self.pool = nn.Sequential()
            elif args.dw:
                # depthwise separable convolutions
                self.conv = Conv(in_channels, in_channels, kernel_size=args.kernel_size, 
                    padding=padding, groups=in_channels, stride=2)
                #self.pool = Conv(in_channels, out_channels, kernel_size=1)
                self.pool = nn.Conv2d(in_channels, out_channels,
                    kernel_size=1, bias=True) #adding bias because no BN before
            else:
                # 1x1 into 3x3 conv stride 2
                self.conv = Conv(in_channels, out_channels, 
                    kernel_size=1, padding=padding, groups=args.group_1x1)
                #self.pool = Conv(out_channels, out_channels, kernel_size=args.kernel_size, 
                #    padding=padding, stride=2)
                self.pool = nn.Conv2d(out_channels, out_channels, kernel_size=args.kernel_size, 
                    padding=padding, stride=2, bias=True) #adding bias because no BN before
        elif args.dw:
            # 1x1 into 3x3 conv stride 2 dw
            self.conv = Conv(in_channels, out_channels, kernel_size=1)
            # self.pool = Conv(out_channels, out_channels, kernel_size=args.kernel_size, 
                # padding=padding, stride=2, groups=out_channels)
            self.pool = nn.Conv2d(out_channels, out_channels, kernel_size=args.kernel_size, 
                    padding=padding, stride=2, bias=True, groups=out_channels) #adding bias because no BN before
        elif args.noavg and args.nomax:
            # 1x1 stride 2
            self.conv = Conv(in_channels, out_channels, kernel_size=1, stride=2)
            self.pool = nn.Sequential()
        else:
            self.conv = Conv(in_channels, out_channels,
                             kernel_size=1, groups=args.group_1x1)
            self.pool = mixgb(args, padding=padding)
Beispiel #22
0
def get_brats_nets(input_shape, filters_list, kernel_size_list, dense_size,
                   nlabels):
    inputs = Input(shape=input_shape)
    conv = inputs
    for filters, kernel_size in zip(filters_list, kernel_size_list):
        conv = Conv(filters,
                    kernel_size=(kernel_size, ) * 3,
                    activation='relu',
                    data_format='channels_first')(conv)

    full = Conv(dense_size,
                kernel_size=(1, 1, 1),
                data_format='channels_first',
                name='fc_dense',
                activation='relu')(conv)
    full_roi = Conv(nlabels[0],
                    kernel_size=(1, 1, 1),
                    data_format='channels_first',
                    name='fc_roi')(full)
    full_sub = Conv(nlabels[1],
                    kernel_size=(1, 1, 1),
                    data_format='channels_first',
                    name='fc_sub')(full)

    rf_roi = Concatenate(axis=1)([conv, full_roi])
    rf_sub = Concatenate(axis=1)([conv, full_sub])

    rf_num = 1
    while np.product(rf_roi.shape[2:]) > 1:
        rf_roi = Conv(dense_size,
                      kernel_size=(3, 3, 3),
                      data_format='channels_first',
                      name='rf_roi%d' % rf_num)(rf_roi)
        rf_sub = Conv(dense_size,
                      kernel_size=(3, 3, 3),
                      data_format='channels_first',
                      name='rf_sub%d' % rf_num)(rf_sub)
        rf_num += 1

    full_roi = Reshape((nlabels[0], -1))(full_roi)
    full_sub = Reshape((nlabels[1], -1))(full_sub)
    full_roi = Permute((2, 1))(full_roi)
    full_sub = Permute((2, 1))(full_sub)
    full_roi_out = Activation('softmax', name='fc_roi_out')(full_roi)
    full_sub_out = Activation('softmax', name='fc_sub_out')(full_sub)

    combo_roi = Concatenate(axis=1)([Flatten()(conv), Flatten()(rf_roi)])
    combo_sub = Concatenate(axis=1)([Flatten()(conv), Flatten()(rf_sub)])

    tumor_roi = Dense(nlabels[0], activation='softmax',
                      name='tumor_roi')(combo_roi)
    tumor_sub = Dense(nlabels[1], activation='softmax',
                      name='tumor_sub')(combo_sub)

    outputs_roi = [tumor_roi, full_roi_out]

    net_roi = Model(inputs=inputs,
                    outputs=outputs_roi,
                    optimizer='adadelta',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    outputs_sub = [tumor_sub, full_sub_out]

    net_sub = Model(inputs=inputs,
                    outputs=outputs_sub,
                    optimizer='adadelta',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    return net_roi, net_sub
Beispiel #23
0
def main():

    train = SurfaceNormalsDataset(args.dataPath + '/actual_training', test = False, normalize=True)
    trainloader = torch.utils.data.DataLoader(train, batch_size=args.batchSize, shuffle=True)

    valid = SurfaceNormalsDataset(args.dataPath + '/validation', test = False, normalize=True)


    # model = torch.nn.Sequential(
    #     Conv(3,10),
    #     Hourglass(4,10,bn=None,increase=64),
    #     Conv(10,10),
    #     Conv(10,3),
    #     )

    if args.model == "":
        model = torch.nn.Sequential(
            Conv(3,args.numFilters),
            Hourglass(args.hourglassSteps,args.numFilters,bn=None,increase=args.increase),
            Conv(args.numFilters,args.numFilters),
            Conv(args.numFilters,3, relu = True),
            )
    else:
        model = torch.load("../models/" + args.model)
    
    if args.gpu:
        torch.cuda.set_device(0)
        model = model.cuda()



    # criterion = torch.nn.MSELoss(size_average=False)
    criterion = MAELoss()
    # criterion = cosDist()

    optimizer = torch.optim.Adam(model.parameters(), lr=args.learn)
    
    start = time.time()
    for epoch in range(args.numEpochs):  # loop over the dataset multiple times

        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            inputs = data['image']
            labels = data['normal']

            # wrap them in Variable
            if args.gpu:
                inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()     
            else:
                inputs, labels = Variable(inputs), Variable(labels)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model(inputs)
            #print(outputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()


            # print statistics
            running_loss += loss.data[0]
            if i % 10 == 9:    # print every 10 mini-batches
                divisor = (10 * 3 * 128 * 128 * args.batchSize)
                divisor = 10
                print('[%d, %5d] loss: %.8f' %
                      (epoch + 1, i + 1, running_loss / (divisor)))
                running_loss = 0.0
                print("Time taken: " + timeStr(time.time() - start))
                print("Time remaining: " + timeRemaining(start, time.time(), epoch, args.numEpochs, i, len(trainloader)))
                torch.save(model, '../models/' + args.outputName)

                valid_loss = get_validation_loss(model, valid, criterion, args)
                print('validation loss:', valid_loss.data[0], '\n')
                sys.stdout.flush()

        torch.save(model, '../models/' + args.outputName)


    print('Finished Training')
Beispiel #24
0
 def __init__(self,resgroups=1,expansion=6, num_levels=4, 
              down_depth = [1,2,2,2], up_depth = [1,1,1],
              filters=[16,24,32,48],endchannels=[16,1],groupings=(1,1),
              upkern=3,use_JPU=False,dilate_channels=32,bias_ll=False):
     super().__init__()
     self.useJPU = False #use_JPU
     self.fused = False
     
     assert num_levels >= 3 and num_levels <= 6
     assert len(filters) == num_levels
     assert len(down_depth) == num_levels
     assert len(up_depth) == num_levels-1
     
     
     self.num_levels = num_levels
     
     # drop to 1/2
     self.encoder0 = Sequential(Conv(3,filters[0],DWS=False,stride=2))
     for j in range(down_depth[0]):
         name = "DownIR_{}_{}".format(0,j)
         self.encoder0.add_module(name,InvertedResidual(filters[0],
                                                        filters[0],
                                                        expansion))
     self.decoder0 = Sequential(InvertedResidual(2*filters[0],filters[0],
                                                 expansion))
     for j in range(up_depth[0]):
         name = "UpIR_{}_{}".format(0,j)
         self.decoder0.add_module(name,InvertedResidual(filters[0],
                                                        filters[0],
                                                        expansion))
     if upkern==3:
         self.upconv0 = UpConvUS(filters[0],endchannels[0],upsample=2,DWS=True)
     else:
         self.upconv0 = UpConv(filters[0],endchannels[0],upsample=2,DWS=True)
         
     # drop to 1/4
     i = 1
     self.encoder1 = Sequential(InvertedResidual(filters[i-1],
                                                 filters[i],
                                                 expansion,
                                                 stride=2))
     for j in range(down_depth[i]):
         name = "DownIR_{}_{}".format(i,j)
         self.encoder1.add_module(name,InvertedResidual(filters[i],
                                                        filters[i],
                                                        expansion))
     self.decoder1 = Sequential(InvertedResidual(2*filters[i],
                                                 filters[i],
                                                 expansion))
     for j in range(up_depth[i]):
         name = "UpIR_{}_{}".format(i,j)
         self.decoder1.add_module(name,InvertedResidual(filters[i],
                                                        filters[i],
                                                        expansion))
     if upkern==3:
         self.upconv1 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
     else:
         self.upconv1 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
     # drop to 1/8
     i = 2
     self.encoder2 = Sequential(InvertedResidual(filters[i-1],
                                                 filters[i],
                                                 expansion,
                                                 stride=2))
     for j in range(down_depth[i]):
         name = "DownIR_{}_{}".format(i,j)
         self.encoder2.add_module(name,InvertedResidual(filters[i],
                                                        filters[i],
                                                        expansion))
     if upkern==3:
         self.upconv2 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
     else:
         self.upconv2 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
     if num_levels > 3:
         # note: decoders only need one fewer level
         self.decoder2 = Sequential(InvertedResidual(2*filters[i],
                                                 filters[i],
                                                 expansion))
         for j in range(up_depth[i]):
             name = "UpIR_{}_{}".format(i,j)
             self.decoder2.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         
         # drop to 1/16
         i = 3
         self.encoder3 = Sequential(InvertedResidual(filters[i-1],
                                                     filters[i],
                                                     expansion,
                                                     stride=2))
         for j in range(down_depth[i]):
             name = "DownIR_{}_{}".format(i,j)
             self.encoder3.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         if upkern==3:
             self.upconv3 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
         else:
             self.upconv3 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
             
     if num_levels > 4:
         self.decoder3 = Sequential(InvertedResidual(2*filters[i],
                                                     filters[i],
                                                     expansion))
         for j in range(up_depth[i]):
             name = "UpIR_{}_{}".format(i,j)
             self.decoder3.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         
         # drop to 1/32
         i = 4
         self.encoder4 = Sequential(InvertedResidual(filters[i-1],
                                                     filters[i],
                                                     expansion,
                                                     stride=2))
         for j in range(down_depth[i]):
             name = "DownIR_{}_{}".format(i,j)
             self.encoder4.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         if upkern==3:
             self.upconv4 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
         else:
             self.upconv4 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
     if num_levels > 5:
         self.decoder4 = Sequential(InvertedResidual(2*filters[i],
                                                     filters[i],
                                                     expansion))
         for j in range(up_depth[i]):
             name = "UpIR_{}_{}".format(i,j)
             self.decoder4.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
             
         # drop to 1/64
         i = 5
         self.encoder5 = Sequential(InvertedResidual(filters[i-1],
                                                     filters[i],
                                                     expansion,
                                                     stride=2))
         for j in range(down_depth[i]):
             name = "DownIR_{}_{}".format(i,j)
             self.encoder5.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         if upkern==3:
             self.upconv5 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
         else:
             self.upconv5 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
         
     self.pred = Conv(endchannels[0],endchannels[1],DWS=False,bias=bias_ll)
     self.edge = Conv(endchannels[0],endchannels[1],DWS=False,bias=bias_ll)
     
     self.quant = QuantStub()
     self.dequant = DeQuantStub()    
Beispiel #25
0
#%% LSTM: WORKS
NN = FF([LSTMFullCon(Tanh, Sigmoid, Softmax, (4, 50, 4))], RNNCrossEntropy)
NN.SGD(train, eta=1e-3)
#%% RNN: WORKS
NN = FF([RecurrentFullCon(Tanh, Softmax, (4, 50, 4))], RNNCrossEntropy)
NN.SGD(train, eta=1e-4)
#%% Networks: WORKS
NNS = [
    FF([FullCon(Sigmoid, (28 * 28, 30)),
        FullCon(Sigmoid, (30, 10))], CrossEntropy) for i in range(3)
]
NN = FF([Networks(Sigmoid, NNS)], CrossEntropy)
NN.SGD(part_train)
#%% Conv+Pool+Dropout: WORKS
NN = FF([
    Conv(Identity, (25, 3)),
    Pool('mean', (3, -1), (2, 2)),
    Dropout('binomial', 0.9),
    FullCon(Sigmoid, (3 * 13 * 13, 10))
], CrossEntropy)
NN.SGD(part_train)
#%% FullCon Swish: WORKS
b = 10.
NN = FF([FullCon(Swish(b), (28 * 28, 30)),
         FullCon(Swish(b), (30, 10))], CrossEntropy)
NN.SGD(part_train)
#%% FullCon: WORKS
NN = FF([FullCon(Sigmoid, (28 * 28, 30)),
         FullCon(Sigmoid, (30, 10))], CrossEntropy)
NN.SGD(part_train)
#%% TO DO
 def __init__(self, x_dim, y_dim):
     super(Merge, self).__init__()
     self.conv = Conv(x_dim, y_dim, 1, relu=False, bn=False)
Beispiel #27
0
 def __init__(self,leak=0,
              norm_type='batch',
              DWS=True,DWSFL=False,
              outerK=3,resgroups=1,
              filters=[8,16,16],
              shuffle=False,
              blocks=[2,2,2,1,1],
              endgroups=(1,1),
              upkern=3,
              bias_ll=True,
              quant=False):
     super().__init__()
     self.fused = False
     self.leak = leak
     self.norm_type = norm_type
     
     # downward conv block (shrink to 1/4x1/4 image)
     self.down_conv = torch.nn.Sequential(
         Conv1stLayer(3, filters[0], outerK, 1, DWS=DWSFL, 
                      norm_type=norm_type, leak=leak),
         Conv(filters[0],filters[1], 3, 2, DWS=DWS,groups=endgroups[0],
              norm_type=norm_type, leak=leak),
         Conv(filters[1], filters[2], 3, 2, DWS=DWS,groups=endgroups[1],
              norm_type=norm_type, leak=leak)
     )
     
     # resblock - most effort is here
     if shuffle:
         self.res_block = torch.nn.Sequential()
         i=0
         for block in blocks:
             self.res_block.add_module(str(i),ResShuffleLayer(filters[2],
                                                       leak=leak,
                                                       norm_type=norm_type,
                                                       DWS=DWS,
                                                       groups=resgroups,
                                                       dilation=block))
             i += 1
     else:
         self.res_block = torch.nn.Sequential()
         i=0
         for block in blocks:
             self.res_block.add_module(str(i),ResLayer(filters[2],
                                                       leak=leak,
                                                       norm_type=norm_type,
                                                       DWS=DWS,
                                                       dilation=block,
                                                       groups=resgroups))
             i += 1
             
     # up conv block (grow to original size)
     if upkern == 4:
         self.up_conv = torch.nn.Sequential(
             UpConv(filters[2], filters[1], 4, 2, DWS=DWS,groups=endgroups[1],
                    norm_type=norm_type, leak=leak),
             UpConv(filters[1], filters[0], 4, 2, DWS=DWS,groups=endgroups[0],
                    norm_type=norm_type, leak=leak),
             Conv(filters[0], 3, outerK, 1, DWS=DWSFL, bias=bias_ll)
         )
     if upkern == 3:
         self.up_conv = torch.nn.Sequential(
             UpConvUS(filters[2], filters[1], 3, 2, DWS=DWS,groups=endgroups[1],
                    norm_type=norm_type, leak=leak),
             UpConvUS(filters[1], filters[0], 3, 2, DWS=DWS,groups=endgroups[0],
                    norm_type=norm_type, leak=leak),
             Conv(filters[0], 3, outerK, 1, DWS=DWSFL, bias=bias_ll)
         )  
     
     if quant:
         self.quant = QuantStub()
         self.dequant = DeQuantStub()
         self.transformer = torch.nn.Sequential(self.quant,
                                                self.down_conv,
                                                self.res_block,
                                                self.up_conv,
                                                self.dequant)
     else:
         self.transformer = torch.nn.Sequential(self.down_conv,
                                                self.res_block,
                                                self.up_conv)