Ejemplo n.º 1
0
    def __init__(self, config):
        BaseModel.__init__(self, config)
        # For bigger models, we want to use "bottleneck" layers
        try:
            if self.config.resnet_size < 50:
                bottleneck = False
            else:
                bottleneck = True
        except AttributeError:
            # sizes allowed: dict_keys([18, 34, 50, 101, 152, 200])
            print('WARN: resnet_size not specified', 'using 101')
            self.config.resnet_size = 101
            bottleneck = True
        self.model = Model(resnet_size=self.config.resnet_size,
                           bottleneck=bottleneck,
                           num_classes=28,
                           num_filters=64,
                           kernel_size=7,
                           conv_stride=2,
                           first_pool_size=3,
                           first_pool_stride=2,
                           block_sizes=_get_block_sizes(
                               self.config.resnet_size),
                           block_strides=[1, 2, 2, 2],
                           resnet_version=2,
                           data_format=None,
                           dtype=tf.float32)

        self.build_model()
        self.init_saver()
Ejemplo n.º 2
0
    def __init__(self, config):
        super(Net, self).__init__() # super只能init第一个父类
        BaseModel.__init__(self, config)

        # d = 56 # out channels of first layer       # s = 32 # out channels of hidden layer
        # self.config.m = 16 # number of layer of hidden layer block

        # self.upsample = nn.Upsample(scale_factor=self.config.scale_factor, mode='nearest')
        # if self.config.scale_factor == 10:
        self.upsample = nn.Sequential(*[
            nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.scale_factor, kernel_size=5, stride=1, padding=2, bias=False),
            SRPUpsampleBlock(scale=self.config.scale_factor),
            nn.LeakyReLU(0.2)
        ])
        # elif self.config.scale_factor == 100:
        #     self.upsample = nn.Sequential(*[
        #         nn.Conv1d(in_channels=self.config.num_channels, out_channels=10, kernel_size=5, stride=1, padding=2, bias=False),
        #         SRPUpsampleBlock(scale=10),
        #         nn.LeakyReLU(0.2),
        #         nn.Conv1d(in_channels=self.config.num_channels, out_channels=10, kernel_size=5, stride=1, padding=2, bias=False),
        #         SRPUpsampleBlock(scale=10),
        #         nn.LeakyReLU(0.2)
        #     ])
        # elif self.config.scale_factor == 1000:
        #     self.upsample = nn.Sequential(*[
        #         nn.Conv1d(in_channels=self.config.num_channels, out_channels=10, kernel_size=5, stride=1, padding=2, bias=False),
        #         SRPUpsampleBlock(scale=10),
        #         nn.LeakyReLU(0.2),
        #         nn.Conv1d(in_channels=self.config.num_channels, out_channels=10, kernel_size=5, stride=1, padding=2, bias=False),
        #         SRPUpsampleBlock(scale=10),
        #         nn.LeakyReLU(0.2),
        #         nn.Conv1d(in_channels=self.config.num_channels, out_channels=10, kernel_size=5, stride=1, padding=2, bias=False),
        #         SRPUpsampleBlock(scale=10),
        #         nn.LeakyReLU(0.2)
        #     ])
        # else:
        #     self.upsample = nn.Sequential(*[
        #         nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.scale_factor, kernel_size=5, stride=1, padding=2, bias=False),
        #         SRPUpsampleBlock(scale=self.config.scale_factor),
        #         nn.LeakyReLU(0.2)
        #     ])

        # Feature extraction
        # if the last is conv, padding is 2
        self.first_part = ConvBlock(self.config.num_channels, self.config.d, self.config.k, 1, 0, activation='lrelu', norm=None)

        self.layers = []
        # Shrinking
        self.layers.append(ConvBlock(self.config.d, self.config.s, 1, 1, 0, activation='lrelu', norm=None))
        # Non-linear Mapping
        for _ in range(int(self.config.m)):
            self.layers.append(ResnetBlock(self.config.s, 3, 1, 1, activation='lrelu', norm='instance'))
        self.layers.append(nn.PReLU())
        # Expanding
        self.layers.append(ConvBlock(self.config.s, self.config.d, 1, 1, 0, activation='lrelu', norm=None))

        self.mid_part = torch.nn.Sequential(*self.layers)

        # Deconvolution
        self.last_part = nn.ConvTranspose1d(int(self.config.d), int(self.config.num_channels), int(self.config.k), 1, 0, output_padding=0)
Ejemplo n.º 3
0
    def __init__(self, config):
        super(Net, self).__init__() # super只能init第一个父类
        BaseModel.__init__(self, config)

        d = 56 # out channels of first layer
        s = 12 # out channels of hidden layer
        m = 4 # number of layer of hidden layer block

        # Feature extraction
        self.first_part = ConvBlock(self.config.num_channels, d, 5, 1, 0, activation='prelu', norm=None)

        self.layers = []
        # Shrinking
        self.layers.append(ConvBlock(d, s, 1, 1, 0, activation='prelu', norm=None))
        # Non-linear Mapping
        for _ in range(m):
            self.layers.append(ResnetBlock(s, 3, 1, 1, activation='prelu', norm='batch'))
        self.layers.append(nn.PReLU())
        # Expanding
        self.layers.append(ConvBlock(s, d, 1, 1, 0, activation='prelu', norm=None))

        self.mid_part = torch.nn.Sequential(*self.layers)

        # Deconvolution
        self.last_part = nn.ConvTranspose1d(d, self.config.num_channels, 25, self.config.scale_factor, 0, output_padding=0)
Ejemplo n.º 4
0
    def __init__(self, config):
        super(Net, self).__init__() # super只能init第一个父类
        BaseModel.__init__(self, config)

        base_filter = 64
        num_residuals = 18

        self.convtransposed = nn.ConvTranspose1d(self.config.num_channels, self.config.num_channels, 10, self.config.scale_factor, 0, output_padding=0)
        self.input_conv = ConvBlock(self.config.num_channels, base_filter, 3, 1, 1, norm=None, bias=False)

        conv_blocks = []
        for _ in range(num_residuals):
            conv_blocks.append(ConvBlock(base_filter, base_filter, 3, 1, 1, norm=None, bias=False))
        self.residual_layers = nn.Sequential(*conv_blocks)

        self.output_conv = ConvBlock(base_filter, self.config.num_channels, 3, 1, 1, activation=None, norm=None, bias=False)
    def __init__(self, network_params, act_out=tf.nn.softplus, lambda_od=0.001, lambda_factor=0.001, dip_type=const.dipi,
                 transfer_fct= tf.nn.relu, learning_rate=1e-5,
                 kinit=tf.contrib.layers.xavier_initializer(), batch_size=32,
                 dropout=0.2, batch_norm=True, epochs=200, checkpoint_dir='',
                 summary_dir='', result_dir='', restore=False, plot=False, clustering=False, colab=False, model_type=const.VAE):

        BaseModel.__init__(self, checkpoint_dir, summary_dir, result_dir)
        self.summary_dir = summary_dir
        self.result_dir = result_dir
        self.batch_size = batch_size
        self.dropout = dropout
        self.lambda_od = lambda_od
        #self.lambda_factor = lambda_factor
        self.dip_type = dip_type

        self.lambda_d = self.lambda_od * lambda_factor

        self.epochs = epochs
        self.w_file = result_dir + '/w_file'
        self.restore = restore
        self.plot = plot
        self.colab = colab
        self.clustering = clustering

        if self.plot:
            self.w_space_files = list()
            self.w_space3d_files = list()
            self.recons_files = list()

        # Creating computational graph for train and test
        self.graph = tf.Graph()
        with self.graph.as_default():
            if(model_type == const.DIPVAE):
                self.model_graph = DIPVAEGraph(network_params=network_params, act_out=act_out,
                                               lambda_od=self.lambda_od, lambda_d=self.lambda_d,
                                               transfer_fct=transfer_fct, learning_rate=learning_rate, kinit=kinit,
                                               batch_size=batch_size, reuse=False)
            if(model_type == const.DIPVAECNN):
                self.model_graph = DIPVAECNNGraph(network_params=network_params, act_out=act_out,
                                                  lambda_od=self.lambda_od, lambda_d=self.lambda_d,
                                                  transfer_fct=transfer_fct, learning_rate=learning_rate, kinit=kinit,
                                                  batch_size=batch_size, reuse=False)

            self.model_graph.build_graph()
            self.trainable_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
Ejemplo n.º 6
0
    def __init__(self, config):
        BaseModel.__init__(self,config)
#        self.config = config
        self.inputs = []
        if self.config.IS_TRAIN:
            self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data')
            self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')
            self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes')
            self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')
            self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')
            self.layers = dict({'data': self.data, 'im_info': self.im_info, 'gt_boxes': self.gt_boxes, \
                                'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})
        else:
            self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
            self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
            self.layers = dict({'data': self.data, 'im_info': self.im_info})
            self.connector = TextProposalConnector()
        self.keep_prob = tf.placeholder(tf.float32)
        self.setup()
        self.init_saver()
Ejemplo n.º 7
0
    def __init__(self, config):
        super(Net, self).__init__()  # super只能init第一个父类
        BaseModel.__init__(self, config)

        self.entry = nn.Conv1d(1, 64, 3, 1, 1)

        self.b1 = Block(64, 64)
        self.b2 = Block(64, 64)
        self.b3 = Block(64, 64)
        self.c1 = ConvBlock(64 * 2, 64, 1, 1, 0, activation='prelu', norm=None)
        self.c2 = ConvBlock(64 * 3, 64, 1, 1, 0, activation='prelu', norm=None)
        self.c3 = ConvBlock(64 * 4, 64, 1, 1, 0, activation='prelu', norm=None)

        self.upsample = nn.ConvTranspose1d(64,
                                           64,
                                           10,
                                           self.config.scale_factor,
                                           0,
                                           output_padding=0)

        self.exit = ConvBlock(64, 1, 3, 1, 1, activation=None, norm=None)
Ejemplo n.º 8
0
 def __init__(self, config):
     BaseModel.__init__(self, config)
     self.build_model()
     self.init_saver()
Ejemplo n.º 9
0
    def __init__(self, config):
        super(Net, self).__init__()  # super只能init第一个父类
        BaseModel.__init__(self, config)

        self.upsample = nn.Upsample(scale_factor=(1, self.config.scale_factor),
                                    mode='bicubic')
Ejemplo n.º 10
0
    def __init__(self, config):
        super(Net, self).__init__()  # super只能init第一个父类
        BaseModel.__init__(self, config)

        L = 4
        n_filters = [128, 256, 512, 512, 512, 512, 512, 512]
        n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
        self.downsampling_l = []
        self.down_layers = []

        # downsampling layers
        for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
            self.temp_layers = []
            self.temp_layers.append(
                nn.Conv1d(in_channels=self.config.num_channels,
                          out_channels=nf,
                          kernel_size=fs,
                          stride=2,
                          padding=nf // 2,
                          bias=False))
            # if l > 0: x = BatchNormalization(mode=2)(x)
            self.temp_layers.append(nn.LeakyReLU(0.2))
            self.down_layers.append(torch.nn.Sequential(*self.temp_layers))

        # bottleneck layer
        self.bottleneck = []
        self.bottleneck.append(
            nn.Conv1d(in_channels=n_filters[-1],
                      out_channels=n_filters[-1],
                      kernel_size=n_filtersizes[-1],
                      stride=2,
                      padding=n_filters[-1] // 2,
                      bias=False))
        self.bottleneck.append(nn.Dropout(p=0.5))
        # x = BatchNormalization(mode=2)(x)
        self.bottleneck.append(nn.LeakyReLU(0.2))
        self.mid_layer = torch.nn.Sequential(*self.bottleneck)

        # upsampling layers
        self.up_layers = []
        for l, nf, fs, l_in in reversed(
                zip(range(L), n_filters, n_filtersizes, downsampling_l)):
            # (-1, n/2, 2f)
            self.temp_layers = []
            self.temp_layers.append(
                nn.Conv1d(in_channels=n_filters[-1],
                          out_channels=2 * nf,
                          kernel_size=fs,
                          padding=fs // 2,
                          bias=False))
            # x = BatchNormalization(mode=2)(x)
            self.temp_layers.append(nn.Dropout(p=0.5))
            self.temp_layers.append(nn.ReLU())
            # (-1, n, f)
            self.temp_layers.append(nn.PixleShuffle(r=2))
            # (-1, n, 2f)
            self.up_layers.append(torch.nn.Sequential(*self.temp_layers))

        self.last_layer = []
        self.last_layer.append(
            nn.Conv1d(in_channels=1,
                      out_channels=2,
                      kernel_size=9,
                      padding=9 // 2,
                      bias=False))
        self.last_layer.append(nn.PixleShuffle(r=2))
Ejemplo n.º 11
0
    def __init__(self, config):
        super(Net, self).__init__() # super只能init第一个父类
        BaseModel.__init__(self, config)

        self.upsample = nn.Upsample(scale_factor=self.config.scale_factor, mode='linear')