Ejemplo n.º 1
0
    def test_misc(self):  # tests of non-main features to improve coverage
        for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
            cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
            reset_seeds()

            optimizer_kw = {
                'total_iterations': 0,
                'decay': 1e-3,
                'amsgrad': optimizer_name == 'AdamW',
                'nesterov': optimizer_name == 'SGDW'
            }
            num_batches = 4
            batch_size, timesteps = 16, 8
            batch_shape = (batch_size, timesteps)
            embed_input_dim = 5
            total_iterations = 0

            self.model = self._make_model(batch_shape,
                                          total_iterations,
                                          embed_input_dim=embed_input_dim,
                                          dense_constraint=1,
                                          l2_reg=1e-4,
                                          bidirectional=False,
                                          sparse=True)
            optimizer = self._make_optimizer(optimizer_name, self.model,
                                             **optimizer_kw)
            self.model.compile(optimizer,
                               loss='sparse_categorical_crossentropy')
            X, Y = self._make_data(num_batches,
                                   *batch_shape,
                                   embed_input_dim=embed_input_dim,
                                   sparse=True)

            for batch_num in range(num_batches):
                self.model.train_on_batch(X[batch_num], Y[batch_num])

            self._test_save_load(self.model, X, optimizer_name, optimizer)

            # util test
            dc = {'lstm': 0, 'dense': 0}
            fill_dict_in_order(dc, [1e-4, 2e-4])
            AdamW(model=self.model, zero_penalties=True)
            AdamW(model=self.model, weight_decays={'a': 0})

            # cleanup
            del self.model, optimizer
            reset_seeds(reset_graph_with_backend=K)

            cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name),
                   'green')
        cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
Ejemplo n.º 2
0
def aunet(pretrained_weights = None, input_size = (256,256,1)):

    inputs = Input(input_size)

    added1 = resblock(inputs, 64)
    pool1 = MaxPooling2D(pool_size=(2, 2))(added1)

    added2 = resblock(pool1, 128)
    pool2 = MaxPooling2D(pool_size=(2, 2))(added2)
    
    added3 = resblock(pool2, 256)
    pool3 = MaxPooling2D(pool_size=(2, 2))(added3)
    

    added4 = resblock(pool3, 512)
    pool4 = MaxPooling2D(pool_size=(2, 2))(added4)

    added5 = resblock(pool4, 1024)
    
    
    merge6 = concat(added4, added5, 512) 
    added6 = Basic(merge6, 512)
   
    
    merge7 = concat(added3, added6, 256)
    added7 = Basic(merge7, 256)

    merge8 = concat(added2, added7, 128)
    added8 = Basic(merge8, 128)

    merge9 = concat(added1, added8, 64)
    added9 = Basic(merge9, 64)
    
    
    conv10 = Conv2D(1, 1, activation = 'sigmoid')(added9)
    model = Model(input = inputs, output = conv10)
    #dl = balanced_cross_entropy(0.8)
    dl=combined_loss()
    opt = AdamW(lr=1e-3, model = model, use_cosine_annealing=True, total_iterations = 24)
    #opt = keras.optimizers.Adam(lr=0.0001, amsgrad=True)
    model.compile(optimizer = opt, loss = dl, metrics = ['accuracy', dice_score])

    if(pretrained_weights):
    	model.load_weights(pretrained_weights)

    return model
Ejemplo n.º 3
0
def test_misc():  # tests of non-main features to improve coverage
    for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
        cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
        reset_seeds()

        optimizer_kw = {
            'total_iterations': 0,
            'decay': 1e-3,
            'amsgrad': optimizer_name == 'AdamW',
            'nesterov': optimizer_name == 'SGDW'
        }
        num_batches = 4
        batch_size, timesteps = 16, 8
        batch_shape = (batch_size, timesteps)
        embed_input_dim = 5

        # arbitrarily select SGDW for coverage testing
        l1_reg = 1e-4 if optimizer_name == 'SGDW' else 0
        l2_reg = 1e-4 if optimizer_name != 'SGDW' else 0
        if optimizer_name == 'SGDW':
            optimizer_kw.update(
                dict(zero_penalties=False,
                     weight_decays={},
                     total_iterations=2,
                     momentum=0))

        model = _make_model(batch_shape,
                            embed_input_dim=embed_input_dim,
                            dense_constraint=1,
                            l1_reg=l1_reg,
                            l2_reg=l2_reg,
                            bidirectional=False,
                            sparse=True)
        optimizer = _make_optimizer(optimizer_name, model, **optimizer_kw)
        model.compile(optimizer, loss='sparse_categorical_crossentropy')
        X, Y = _make_data(num_batches,
                          *batch_shape,
                          embed_input_dim=embed_input_dim,
                          sparse=True)

        for batch_num in range(num_batches):
            model.train_on_batch(X[batch_num], Y[batch_num])

        _test_save_load(model, X, optimizer_name, optimizer)

        # util test
        dc = {'lstm': 0, 'dense': 0}
        fill_dict_in_order(dc, [1e-4, 2e-4])
        AdamW(model=model, zero_penalties=False, total_iterations=2)
        AdamW(model=model, weight_decays={'a': 0})

        opt = AdamW(weight_decays={model.layers[1].weights[0].name: (0, 0)},
                    total_iterations=2)
        model.compile(opt, loss='sparse_categorical_crossentropy')
        model.train_on_batch(X[0], Y[0])

        # cleanup
        del model, optimizer
        reset_seeds(reset_graph_with_backend=K)
        try:
            K_eval('x', K)  # for coverage
        except:
            pass

        cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name), 'green')
    cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
def deepSupervision(pretrained_weights=None, input_size=(256, 256, 3)):

    inputs = Input(input_size)
    #Encoder 1
    added1 = encoder(inputs, 64)
    pool1 = MaxPooling2D(pool_size=(2, 2))(added1)

    #Encoder 2
    added2 = encoder(pool1, 128)
    pool2 = MaxPooling2D(pool_size=(2, 2))(added2)
    att1 = attentionblock(added2, (2, 2))

    #Encoder 3
    added3 = encoder(pool2, 256)
    pool3 = MaxPooling2D(pool_size=(2, 2))(added3)
    att2 = attentionblock(added3, (4, 4))

    #Encoder 4
    added4 = encoder(pool3, 512)
    pool4 = MaxPooling2D(pool_size=(2, 2))(added4)
    att3 = attentionblock(added4, (8, 8))

    #Encoder 5
    added5 = encoder(pool4, 1024)

    #Decoder 1
    merge6 = decoder(added4, added5, 512)
    added6 = Dropout(0.5)(merge6)
    #added6 = merge6
    att4 = attentionblock(added6, (8, 8))

    #Decoder 2
    merge7 = decoder(added3, added6, 256)
    added7 = Dropout(0.5)(merge7)
    #added7 = merge7
    att5 = attentionblock(added7, (4, 4))

    #Decoder 3
    merge8 = decoder(added2, added7, 128)
    added8 = Dropout(0.5)(merge8)
    #added8 = merge8
    att6 = attentionblock(added8, (2, 2))

    #Decoder 4
    added9 = decoder(added1, added8, 64)
    added9 = Dropout(0.5)(added9)

    added9 = concatenate([added9, att1, att2, att3, att4, att5, att6], axis=3)
    added10 = Conv2D(1, 1, activation='sigmoid', padding='same')(added9)

    model = Model(input=inputs, output=added10)
    dl = composite_loss(att1, att2, att3, att4, att5, att6, added9)
    opt = AdamW(lr=1e-3,
                model=model,
                use_cosine_annealing=True,
                total_iterations=24)
    model.compile(optimizer=opt, loss=dl, metrics=['accuracy', dice_score])
    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Ejemplo n.º 5
0
#%%############################################################################
ipt = Input(shape=(120, 4))
x = LSTM(60,
         activation='relu',
         name='lstm_1',
         kernel_regularizer=l1(1e-4),
         recurrent_regularizer=l2(2e-4))(ipt)
out = Dense(1, activation='sigmoid', kernel_regularizer=l1_l2(1e-4, 2e-4))(x)
model = Model(ipt, out)

lr_multipliers = {'lstm_1': 0.5}

optimizer = AdamW(lr=1e-4,
                  model=model,
                  lr_multipliers=lr_multipliers,
                  use_cosine_annealing=True,
                  total_iterations=24)
model.compile(optimizer, loss='binary_crossentropy')

#%%############################################################################
eta_history = []
lr_history = []
for epoch in range(3):
    for iteration in range(24):
        x = np.random.rand(10, 120, 4)  # dummy data
        y = np.random.randint(0, 2, (10, 1))  # dummy labels
        loss = model.train_on_batch(x, y)
        eta_history.append(K_eval(model.optimizer.eta_t, K))
        lr_history.append(K_eval(model.optimizer.lr_t, K))
        print("Iter {} loss: {}".format(iteration + 1, "%.3f" % loss))