예제 #1
0
def BinaryMobilenetV2():
  model = Sequential() 
  model.add(BinaryConv2D(32, kernel_size=kernel_size, strides=(2, 2), H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_last',
                     padding='same', use_bias=use_bias,input_shape=(32, 32, 3)))
  model.add(BatchNormalization(axis=1))
  model.add(Activation(binary_tanh))
  model = InvertedResidualBlock(model, 16, (3, 3), t=1, strides=1, n=1)
  model = InvertedResidualBlock(model, 24, (3, 3), t=6, strides=2, n=2)
  model = InvertedResidualBlock(model, 32, (3, 3), t=6, strides=2, n=3)
  model = InvertedResidualBlock(model, 64, (3, 3), t=6, strides=2, n=4)
  model = InvertedResidualBlock(model, 96, (3, 3), t=6, strides=1, n=3)
  model = InvertedResidualBlock(model, 160, (3, 3), t=6, strides=2, n=3)
  model = InvertedResidualBlock(model, 320, (3, 3), t=6, strides=1, n=1)

  model.add(BinaryConv2D(1280, H=H,kernel_size=(1,1), kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias,strides=(1, 1)))
  model.add(BatchNormalization(axis=1))
  model.add(Activation(binary_tanh))

  model.add(Flatten()) 
  model.add(BinaryDense(classes, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
  model.add(BatchNormalization(axis=1))

  return model
예제 #2
0
def addBottleneck(model_input, filters, kernel, t, s):
  model = model_input
  channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
  tchannel = K.int_shape(model_input.layers[-1].output)[channel_axis] *t
  model.add(BinaryConv2D(tchannel, kernel_size=(1,1), H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_last',
                     padding='same', use_bias=use_bias))
  model.add(BatchNormalization(axis=1))
  model.add(Activation(binary_tanh))

  model.add(SeparableBinaryConv2D(filters,kernel_size=kernel_size,strides=(s,s), H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_last',
                     padding='same', use_bias=use_bias))
  model.add(BatchNormalization(axis=1))
  #model.add(Activation(binary_tanh))

  return model
예제 #3
0
def unet(pretrained_weights=None,
         batch_size=2,
         input_shape=data_shape,
         input_size=(30, 32, 32, 1)):
    inputs = Input(shape=data_shape, batch_size=batch_size)
    bin_conv1 = TimeDistributed(
        BinaryConv2D(1,
                     kernel_size=(32, 32),
                     input_shape=input_size,
                     data_format='channels_last',
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     padding='same',
                     use_bias=use_bias,
                     name='bin_conv_1'))(inputs)
    s = Lambda(streak, output_shape=streak_output_shape)(bin_conv1)
    i = Lambda(integrate_ims, output_shape=integrate_ims_output_shape)(s)
    f = Flatten()(i)
    dense1 = Dense(30720, activation='relu')(f)
    resh = Reshape((30, 32, 32, 1))(dense1)
    c1 = TimeDistributed(
        Conv2D(16, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(resh)
    c1 = Dropout(0.1)(c1)
    c1 = TimeDistributed(
        Conv2D(16, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c1)
    p1 = TimeDistributed(MaxPooling2D((2, 2)))(c1)

    c2 = TimeDistributed(
        Conv2D(32, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(p1)
    c2 = Dropout(0.1)(c2)
    c2 = TimeDistributed(
        Conv2D(32, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c2)
    p2 = TimeDistributed(MaxPooling2D((2, 2)))(c2)

    c3 = TimeDistributed(
        Conv2D(64, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(p2)
    c3 = Dropout(0.2)(c3)
    c3 = TimeDistributed(
        Conv2D(64, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c3)
    p3 = TimeDistributed(MaxPooling2D((2, 2)))(c3)

    c4 = TimeDistributed(
        Conv2D(128, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(p3)
    c4 = Dropout(0.2)(c4)
    c4 = TimeDistributed(
        Conv2D(128, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c4)
    p4 = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(c4)

    c5 = TimeDistributed(
        Conv2D(256, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(p4)
    c5 = Dropout(0.3)(c5)
    c5 = TimeDistributed(
        Conv2D(256, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c5)

    u6 = TimeDistributed(
        Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same'))(c5)
    u6 = concatenate([u6, c4])
    c6 = TimeDistributed(
        Conv2D(128, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(u6)
    c6 = Dropout(0.2)(c6)
    c6 = TimeDistributed(
        Conv2D(128, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c6)

    u7 = TimeDistributed(
        Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same'))(c6)
    u7 = concatenate([u7, c3])
    c7 = TimeDistributed(
        Conv2D(64, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(u7)
    c7 = Dropout(0.2)(c7)
    c7 = TimeDistributed(
        Conv2D(64, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c7)

    u8 = TimeDistributed(
        Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same'))(c7)
    u8 = concatenate([u8, c2])
    c8 = TimeDistributed(
        Conv2D(32, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(u8)
    c8 = Dropout(0.1)(c8)
    c8 = TimeDistributed(
        Conv2D(32, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c8)

    u9 = TimeDistributed(
        Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same'))(c8)
    c9 = TimeDistributed(
        Conv2D(16, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(u9)
    c9 = Dropout(0.1)(c9)
    c9 = TimeDistributed(
        Conv2D(16, (2, 2),
               activation='elu',
               kernel_initializer='he_normal',
               padding='same'))(c9)

    outputs = TimeDistributed(Conv2D(1, (1, 1), activation='sigmoid'))(c9)

    model = Model(inputs=[inputs], outputs=[outputs])

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='mean_squared_error',
                  metrics=['accuracy'])

    return model
예제 #4
0
def BinaryVGG16():
  model = Sequential()
  # 64
  model.add(BinaryConv2D(64, kernel_size=kernel_size, input_shape=(32, 32, 3),
                         data_format='channels_last',
                         H=H, kernel_lr_multiplier=kernel_lr_multiplier, 
                         padding='same', use_bias=use_bias, name='conv1'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1'))
  model.add(Activation(binary_tanh, name='act1'))
  model.add(BinaryConv2D(64, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier, 
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv2'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool2', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn2'))
  model.add(Activation(binary_tanh, name='act2'))
  # 128
  model.add(BinaryConv2D(128, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv3'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn3'))
  model.add(Activation(binary_tanh, name='act3'))
  model.add(BinaryConv2D(128, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv4'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool4', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn4'))
  model.add(Activation(binary_tanh, name='act4'))
  # 256
  model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv5'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn5'))
  model.add(Activation(binary_tanh, name='act5'))
  model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv6'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn6'))
  model.add(Activation(binary_tanh, name='act6'))
  model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv7'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool7', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn7'))
  model.add(Activation(binary_tanh, name='act7'))
  # 512
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv8'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn8'))
  model.add(Activation(binary_tanh, name='act8'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv9'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn9'))
  model.add(Activation(binary_tanh, name='act9'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv10'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool10', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn10'))
  model.add(Activation(binary_tanh, name='act10'))
  #512
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv11'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn11'))
  model.add(Activation(binary_tanh, name='act11'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv12'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn12'))
  model.add(Activation(binary_tanh, name='act12'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv13'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool13', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn13'))
  model.add(Activation(binary_tanh, name='act13'))
  #final
  model.add(Flatten())
  # dense1
  model.add(BinaryDense(4096, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense1'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn14'))
  model.add(Activation(binary_tanh, name='act14'))
  # dense2
  model.add(BinaryDense(4096, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense2'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn15'))
  model.add(Activation(binary_tanh, name='act15'))
  #dense3
  model.add(BinaryDense(classes, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense3'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn16'))

  return model
예제 #5
0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, classes) * 2 - 1 # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, classes) * 2 - 1


model = Sequential()
# conv1
model.add(BinaryConv2D(128, kernel_size=kernel_size, input_shape=(channels, img_rows, img_cols),
                       data_format='channels_first',
                       H=H, kernel_lr_multiplier=kernel_lr_multiplier, 
                       padding='same', use_bias=use_bias, name='conv1'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1'))
model.add(Activation(binary_tanh, name='act1'))
# conv2
model.add(BinaryConv2D(128, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier, 
                       data_format='channels_first',
                       padding='same', use_bias=use_bias, name='conv2'))
model.add(MaxPooling2D(pool_size=pool_size, name='pool2'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn2'))
model.add(Activation(binary_tanh, name='act2'))
# conv3
model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                       data_format='channels_first',
                       padding='same', use_bias=use_bias, name='conv3'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn3'))
예제 #6
0
validate = mask(validate, ims, bk_temp)
validate2 = validate
validate2 = np_streak(validate)
validate3 = np.sum(validate2, axis=1)

model1 = Sequential()

model1.add(Input(shape=(30, 32, 32, 1), batch_size=100))

model1.add(
    TimeDistributed(
        BinaryConv2D(1,
                     kernel_size=(32, 32),
                     input_shape=(30, 32, 32, 1),
                     data_format='channels_last',
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     padding='same',
                     use_bias=use_bias,
                     name='bin_conv_1')))
model1.compile(optimizer=Adam(lr=1),
               loss='mean_squared_error',
               metrics=['mse'])
model1.summary()
history1 = model1.fit(ims, validate, batch_size=100, epochs=5, verbose=2)

model2 = Sequential()

model2.add(Input(shape=(30, 32, 32, 1), batch_size=100))

model2.add(
예제 #7
0
#Y_test = np_utils.to_categorical(y_test, classes) * 2 - 1
X_train = np.zeros((80, 256, 256, 1))
for i in range(1, 81):
    im = cv2.imread('../data/' + str(i) + '.png', 0)
    im = np.reshape(im, (256, 256, 1))
    X_train[i - 1, :, :, :] = im

print(np.shape(X_train))

model = Sequential()

cnn = BinaryConv2D(1,
                   kernel_size=kernel_size,
                   input_shape=(img_rows, img_cols, channels),
                   data_format='channels_last',
                   H=H,
                   kernel_lr_multiplier=kernel_lr_multiplier,
                   padding='same',
                   use_bias=use_bias,
                   name='conv1')
bk = cnn.build((80, 256, 256, 1))
outputs, bk_temp = cnn.call(X_train)

print(np.shape(bk_temp))
plt.figure(0)
plt.imshow(np.reshape(X_train[0], (256, 256)),
           interpolation='nearest',
           cmap='hot')
plt.figure(1)
plt.imshow(np.reshape(outputs[0], (256, 256)),
           interpolation='nearest',
def build_model(kernel_size, channels, img_rows, img_cols, H,
                kernel_lr_multiplier, use_bias, epsilon, momentum):
    model = Sequential()
    # conv1
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     input_shape=(channels, img_rows, img_cols),
                     data_format='channels_first',
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     padding='same',
                     use_bias=use_bias,
                     name='conv1'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool1',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn1'))
    model.add(Activation(binary_tanh, name='act1'))
    # conv2
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_first',
                     padding='same',
                     use_bias=use_bias,
                     name='conv2'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool2',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn2'))
    model.add(Activation(binary_tanh, name='act2'))
    # conv3
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_first',
                     padding='same',
                     use_bias=use_bias,
                     name='conv3'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool3',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn3'))
    model.add(Activation(binary_tanh, name='act3'))
    # conv4
    model.add(
        BinaryConv2D(256,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_first',
                     padding='same',
                     use_bias=use_bias,
                     name='conv4'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool4',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn4'))
    model.add(Activation(binary_tanh, name='act4'))
    model.add(Flatten())
    # dense1
    model.add(
        BinaryDense(256,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense5'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
    model.add(Activation(binary_tanh, name='act5'))
    # dense2
    model.add(
        BinaryDense(classes,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense6'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))

    opt = Adam(lr=lr_start)
    model.compile(loss='squared_hinge', optimizer=opt, metrics=['acc'])
    model.summary()
    return model
예제 #9
0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes) * 2 - 1 # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, nb_classes) * 2 - 1


model = Sequential()
# conv1
model.add(BinaryConv2D(128, 3, 3, input_shape=(nb_channel, img_rows, img_cols),
                              H=H, W_lr_multiplier=W_lr_multiplier, 
                              border_mode='same', bias=bias, name='conv1'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1'))
model.add(Activation(binary_tanh, name='act1'))
# conv2
model.add(BinaryConv2D(128, 3, 3, H=H, W_lr_multiplier=W_lr_multiplier, 
                              border_mode='same', bias=bias, name='conv2'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool2'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn2'))
model.add(Activation(binary_tanh, name='act2'))
# conv3
model.add(BinaryConv2D(256, 3, 3, H=H, W_lr_multiplier=W_lr_multiplier,
                              border_mode='same', bias=bias, name='conv3'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn3'))
model.add(Activation(binary_tanh, name='act3'))
# conv4
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, classes) * 2 - 1 # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, classes) * 2 - 1


model = Sequential()
# conv1
model.add(BinaryConv2D(32, kernel_size=kernel_size, input_shape=(channels, img_rows, img_cols),
                       data_format='channels_first',
                       H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                       padding='same', use_bias=use_bias, name='conv1'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1'))
model.add(Activation(binary_tanh, name='act1'))
##############################################################################################################

# conv_dw_2_1
model.add(BinaryConv2D(32, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                       data_format='channels_first',
                       padding='same', use_bias=use_bias, name='conv_dw_2_1'))
#model.add(MaxPooling2D(pool_size=pool_size, name='pool2', data_format='channels_first'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='conv_dw_bn_2_1'))
model.add(Activation(binary_tanh, name='conv_dw_act_2_1'))

# conv_1x1_2_1
model.add(BinaryConv2D(32, kernel_size=mini_kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
예제 #11
0
def mse_loss(y_true,y_pred):
    return K.mean(K.square(y_pred-y_true))
"""
Fine Tuning Parameters: 
Ratio between MSE and SSIM 
"""

lrs = [0.00001,0.0001,0.001,0.01]
mse_losses = []
ssim_losses = []

forward_model = Sequential()
forward_model.add(Input(shape=(30,32,32,1),batch_size = 32))
forward_model.add(TimeDistributed(BinaryConv2D(1, kernel_size=(32,32), input_shape=(30,32,32,1),
                       data_format='channels_last',
                       H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                       padding='same', use_bias=use_bias, name='bin_conv_1')))
forward_model.add(Reshape((30,32,32)))
forward_model.add(Lambda(streak,output_shape=streak_output_shape))
forward_model.add(Lambda(integrate_ims, output_shape = integrate_ims_output_shape))
forward_model.add(Flatten())
forward_model.add(Dense(30720, activation = 'relu'))
forward_model.add(Reshape((30,32,32,1)))
forward_model.compile(optimizer = Nadam(0.0001), loss = custom_loss, metrics = ['mean_squared_error',mse_loss])

forward_model.load_weights('../data/model_stuff/forward_weights_4_12.h5')
binary_weights = forward_model.layers[0].get_weights()
inverse_weights = forward_model.layers[5].get_weights()

for i in range(4):
    inner_mse_losses=[]
예제 #12
0
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train,
                                  classes) * 2 - 1  # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, classes) * 2 - 1

model = Sequential()  # This model is a linear stack of layers.

# conv1
model.add(
    BinaryConv2D(128,
                 kernel_size=kernel_size,
                 input_shape=(channels, img_rows, img_cols),
                 data_format='channels_first',
                 kernel_regularizer=regularizers.l2(0.01),
                 activity_regularizer=regularizer.l1(0.01),
                 H=H,
                 kernel_lr_multiplier=kernel_lr_multiplier,
                 padding='same',
                 use_bias=use_bias,
                 name='conv1'))
model.add(
    BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1'))
model.add(Activation(binary_tanh, name='act1'))

# conv2
model.add(
    BinaryConv2D(128,
                 kernel_size=kernel_size,
                 H=H,
                 kernel_lr_multiplier=kernel_lr_multiplier,
예제 #13
0
def Binary_Net(kernel_size, img_rows, img_cols, channels, data_format, H,
               kernel_lr_multiplier, use_bias, epsilon, momentum, classes,
               pool_size):
    model = Sequential()
    # conv1
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     input_shape=(img_rows, img_cols, channels),
                     data_format=data_format,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     padding='same',
                     use_bias=use_bias,
                     name='conv1'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn1'))
    model.add(Activation(binary_tanh, name='act1'))
    # conv2
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format=data_format,
                     padding='same',
                     use_bias=use_bias,
                     name='conv2'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool2',
                     data_format=data_format))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn2'))
    model.add(Activation(binary_tanh, name='act2'))
    # conv3
    model.add(
        BinaryConv2D(256,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format=data_format,
                     padding='same',
                     use_bias=use_bias,
                     name='conv3'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn3'))
    model.add(Activation(binary_tanh, name='act3'))
    # conv4
    model.add(
        BinaryConv2D(256,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format=data_format,
                     padding='same',
                     use_bias=use_bias,
                     name='conv4'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool4',
                     data_format=data_format))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn4'))
    model.add(Activation(binary_tanh, name='act4'))
    model.add(Flatten())
    # dense1
    model.add(
        BinaryDense(1024,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense5'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
    model.add(Activation(binary_tanh, name='act5'))
    # dense2
    model.add(
        BinaryDense(classes,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense6'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))
    return model