コード例 #1
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
def get_conv1d_model_old(input_shape, output_shape):
    inputs = Input(shape=input_shape)
    xxx = inputs

    xxx = LocallyConnected1D(filters=m_filter_num, kernel_size=s_filter_num, padding='valid',
                             activation=default_activation,strides=1)(xxx)
    xxx = LocallyConnected1D(filters=l_filter_num, kernel_size=m_filter_num, padding='valid',
                             activation=default_activation, strides=1)(xxx)
    # we use max pooling:
    xxx = GlobalMaxPooling1D()(xxx)
    xxx = Dense(output_shape)(xxx)
    predictions = Activation('softmax')(xxx)
    model = Model(inputs=inputs, outputs=predictions)
    return model
コード例 #2
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
 def get_current_melody_feature(current_melody):
     x = LocallyConnected1D(filters=16,
                            kernel_size=8,
                            padding='valid',
                            activation='relu',
                            strides=1)(current_melody)
     x = LocallyConnected1D(filters=32,
                            kernel_size=4,
                            padding='valid',
                            activation='relu',
                            strides=1)(x)
     x = GlobalMaxPooling1D()(x)
     x = Dense(input_unit_length, activation='sigmoid')(x)
     return x
コード例 #3
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
def get_resNet_model_multiple_out(input_shape, output_shape, output_n):
    inputs = Input(shape=input_shape)
    xxx = inputs
    xxx = Conv1D(filters=xl_filter_num,
                 kernel_size=m_filter_num,
                 padding='same',
                 activation=None,
                 strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    xxx = Activation('relu')(xxx)
    xxx = MaxPooling1D(pool_size=1, padding='same', strides=1)(xxx)

    xxx = resnet_v1(input_shape,
                    num_classes=output_shape,
                    depth=3 * 6 + 2,
                    input_tensor=xxx,
                    local_conv=False)

    xxx = LocallyConnected1D(filters=l_filter_num,
                             kernel_size=m_filter_num,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    xxx = LocallyConnected1D(filters=l_filter_num,
                             kernel_size=m_filter_num,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    xxx = LocallyConnected1D(filters=xl_filter_num,
                             kernel_size=4,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)

    xxx = GlobalMaxPooling1D()(xxx)
    outs = []
    for i in range(output_n):
        tmp_x = xxx
        tmp_x = Dense(int(output_shape / output_n))(tmp_x)
        predictions = Activation('softmax')(tmp_x)
        outs.append(predictions)
    # outs = tf.Print(outs,[outs],'out softmax')
    # 'concatenate_1'
    predictions = concatenate(outs)

    model = Model(inputs=inputs, outputs=predictions)
    return model
コード例 #4
0
def create_model_LocallyConnected(input_shape,
                                  h1_unit=16,
                                  optimizer="adagrad",
                                  init="normal",
                                  h1_activation="relu"):
    # create model
    model = Sequential()
    model.add(
        LocallyConnected1D(h1_unit,
                           3,
                           border_mode="valid",
                           input_shape=input_shape))
    # model.add(LocallyConnected1D(h1_unit, 3, border_mode="valid", input_shape=input_shape))
    # model.add(AveragePooling1D())
    model.add(Flatten())
    # model.add(Dropout(0.5))
    model.add(
        Dense(1,
              init=init,
              activation='linear',
              activity_regularizer=activity_l2(0.01)))
    # Compile model
    model.compile(loss="mse", optimizer=optimizer)
    print model.summary()
    return model
コード例 #5
0
ファイル: mcda.py プロジェクト: namletien/mcda
def make_wasserstein():
    model = Sequential()
    model.add(Dense(was_dim * 11, kernel_initializer='he_normal', input_shape = (mid_dim,)))
    model.add(LeakyReLU())
    model.add(Reshape((was_dim * 11, 1)))
    model.add(LocallyConnected1D(1, was_dim, strides = was_dim, kernel_initializer='he_normal'))
    return model
コード例 #6
0
ファイル: mcda.py プロジェクト: namletien/mcda
def make_critics():
    model = Sequential()
    model.add(Dense(was_dim * 11, input_shape=(mid_dim, )))
    model.add(LeakyReLU())
    model.add(Reshape((was_dim * 11, 1)))
    model.add(LocallyConnected1D(1, was_dim, strides=was_dim))
    return model
コード例 #7
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
 def get_melody_feature(main_melody):
     # Dense Attention or Conv?
     mask = Conv1D(filters=input_shape[1],
                   kernel_size=32,
                   padding='same',
                   activation='sigmoid',
                   strides=1,
                   dilation_rate=4)(main_melody)
     # x = merge([main_melody, mask], output_shape=input_shape_melody[0], name='attention_mul', mode='mul')
     x = multiply([main_melody, mask], name='attention_mul')
     x = LocallyConnected1D(filters=16,
                            kernel_size=32,
                            padding='valid',
                            activation='sigmoid',
                            strides=1)(x)
     x = Conv1D(filters=32,
                kernel_size=32,
                padding='same',
                activation='sigmoid',
                strides=1,
                dilation_rate=2)(x)
     x = Conv1D(filters=64,
                kernel_size=32,
                padding='same',
                activation='sigmoid',
                strides=1,
                dilation_rate=2)(x)
     x = GlobalMaxPooling1D()(x)
     x = Dense(melody_feature_length, activation='sigmoid')(x)
     return x
コード例 #8
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
def resnet_layer_local(inputs,
                 num_filters=16,
                 kernel_size=3,
                 strides=1,
                 activation=default_activation,
                 batch_normalization=False,
                 conv_first=True):

    conv = LocallyConnected1D(num_filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='valid',
                  kernel_initializer='he_normal',
                  )
    x = inputs
    if conv_first:
        # import ipdb; ipdb.set_trace()
        x = same_padding_second_dim(x, padding_length=kernel_size, name = x.name.split('/')[0])(x)
        x = conv(x)
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
    else:
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
        x = same_padding_second_dim(x, padding_length=kernel_size, name = x.name.split('/')[0])(x)
        x = conv(x)
    return x
コード例 #9
0
 def conv1d_bn_local(x,
                     filters,
                     num_row,
                     num_col,
                     padding='valid',
                     strides=(1, 1),
                     name=None):
     strides = (1, 1)
     if name is not None:
         bn_name = name + '_bn'
         conv_name = name + '_conv'
     else:
         bn_name = None
         conv_name = None
     if K.image_data_format() == 'channels_first':
         bn_axis = 1
     else:
         bn_axis = 3
     print("****" * 10)
     print('before x:', x.shape, 'num row', num_row)
     x = same_padding_second_dim(x, padding_length=num_row, name=x.name.split('/')[0])(x)
     print('after x:', x.shape)
     bn_axis = 1
     x = LocallyConnected1D(
         filters, (num_row),
         strides=strides[0],
         padding=padding,
         use_bias=False,
         name=conv_name)(x)
     print('after local conv:', x.shape)
     x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
     x = Activation('relu', name=name)(x)
     return x
コード例 #10
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
def get_conv1d_model_b_small(input_shape, output_shape):
    inputs = Input(shape=input_shape)
    xxx = inputs
    if (first_conv):
        xxx = Conv1D(filters=s_filter_num,
                     kernel_size=m_filter_size,
                     padding='same',
                     activation=default_activation,
                     strides=1,
                     dilation_rate=1)(xxx)

    xxx = LocallyConnected1D(filters=s_filter_num,
                             kernel_size=s_filter_size,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    xxx = Conv1D(filters=l_filter_num,
                 kernel_size=s_filter_num,
                 padding='valid',
                 activation=default_activation,
                 strides=1)(xxx)
    xxx = Conv1D(filters=xl_filter_num,
                 kernel_size=m_filter_num,
                 padding='valid',
                 activation=default_activation,
                 strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    # xxx = Activation(default_activation)(xxx)
    # we use max pooling:
    xxx = GlobalMaxPooling1D()(xxx)
    xxx = Dense(output_shape)(xxx)
    predictions = Activation('softmax')(xxx)
    model = Model(inputs=inputs, outputs=predictions)
    return model
コード例 #11
0
ファイル: selu_net.py プロジェクト: ColaColin/Quadflor
def create_locally_connected_head_network(input_size, units, output_sizes, final_activations, verbose, loss_funcs = 'categorical_crossentropy', metrics=[], optimizer='adam'):
    # be nice to other processes that also use gpu memory by not monopolizing it on process start
    # in case of vram memory limitations on large networks it may be helpful to not set allow_growth and grab it all directly
    import tensorflow as tf
    from keras.backend.tensorflow_backend import set_session, get_session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LocallyConnected1D, ZeroPadding1D, Reshape, Flatten
    from keras.models import Sequential
    from keras.optimizers import Adam
    from keras.layers.noise import AlphaDropout
    
    from keras.models import Model
    
    kinit = "lecun_normal";
    head = Sequential()
    
    head.add(Reshape((input_size, 1), input_shape=(input_size,)))
    
    dropoutRate = 0.1
    
    stride = 500 #this cannot just be changed without fixing padsNeeded to be more general
    padsNeeded = ((math.ceil(input_size / stride) + 1) * stride - input_size - 1) % stride
    if verbose: 
        print("Padding of %i zeros will be used to pad input of size %i to size %i" % (padsNeeded, input_size, input_size + padsNeeded))
    head.add(ZeroPadding1D(padding=(0, padsNeeded)))
    head.add(LocallyConnected1D(units[0], 1000, strides=stride, kernel_initializer=kinit))
    head.add(Activation("selu"))
    head.add(Flatten())
    
    for u_cnt in units[1:]:
        head.add(Dense(u_cnt, kernel_initializer=kinit))
        head.add(Activation("selu"))
        head.add(AlphaDropout(dropoutRate))

    inp = Input(shape=(input_size,))
    
    outputs = []
    
    headi = head(inp)
    
    for o, a in zip(output_sizes, final_activations):
        l = headi;
        for oc, ac in zip(o, a):
            l = Dense(oc, activation=ac, kernel_initializer=kinit)(l) # in the original experiments the kernal initializer was not set correctly here. Likely no big change?
        outputs.append(l)

    model = Model(inputs=[inp], outputs=outputs)
    
    model.compile(loss=loss_funcs,
                  optimizer=optimizer,
                  metrics=metrics)
                  
    if verbose:
        model.summary()
    
    return model
コード例 #12
0
ファイル: selu_net.py プロジェクト: ColaColin/Quadflor
def create_locally_connected_network(input_size, units, output_size, final_activation, verbose, loss_func = 'categorical_crossentropy', metrics=[], optimizer='adam', inner_local_units_size=50):
    # be nice to other processes that also use gpu memory by not monopolizing it on process start
    # in case of vram memory limitations on large networks it may be helpful to not set allow_growth and grab it all directly
    import tensorflow as tf
    from keras.backend.tensorflow_backend import set_session, get_session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))
    
    from keras.layers import Input, Dense, Activation, Dropout, BatchNormalization, LocallyConnected1D, ZeroPadding1D, Reshape, Flatten
    from keras.models import Sequential
    from keras.optimizers import Adam
    from keras.layers.noise import AlphaDropout
    from keras.models import Model

    kinit = "lecun_normal";
    model = Sequential()
    
    model.add(Reshape((input_size, 1), input_shape=(input_size,)))
    
    dropoutRate = 0.1
    
    stride = 500 #this cannot just be changed without fixing padsNeeded to be more general
    padsNeeded = ((math.ceil(input_size / stride) + 1) * stride - input_size - 1) % stride
    model.add(ZeroPadding1D(padding=(0, padsNeeded)))
    model.add(LocallyConnected1D(units[0], 1000, strides=stride, kernel_initializer=kinit))
    model.add(Activation("selu"))
    
    for u_cnt in units[1:]:
        model.add(LocallyConnected1D(u_cnt, inner_local_units_size, strides=1, kernel_initializer=kinit))
        model.add(Activation("selu"))
        model.add(AlphaDropout(dropoutRate))

    model.add(Flatten())

    model.add(Dense(output_size, activation=final_activation, kernel_initializer=kinit))
   
    model.compile(loss=loss_func,
                  optimizer=optimizer,
                  metrics=metrics)
                  
    if verbose:
        model.summary()
    
    return model
コード例 #13
0
def localconv1d(x, filters, kernel_size, strides=1, use_bias=True, name=None):
    """LocallyConnected1D possibly wrapped by a TimeDistributed layer."""
    f = LocallyConnected1D(filters,
                           kernel_size,
                           strides=strides,
                           use_bias=use_bias,
                           name=name)

    return TimeDistributed(f, name=name)(x) if K.ndim(x) == 4 else f(x)
コード例 #14
0
def get_resnet_model(save_path, model_res=1024, image_size=256):
    # Build model
    if os.path.exists(save_path):
        print('Loading existing model')
        model = load_model(save_path)
    else:
        print('Building model')
        model_scale = int(
            2 * (math.log(model_res, 2) - 1))  # For example, 1024 -> 18
        resnet = ResNet50(include_top=False,
                          pooling=None,
                          weights='imagenet',
                          input_shape=(image_size, image_size, 3))
        model = Sequential()
        model.add(resnet)
        model.add(Conv2D(model_scale * 8,
                         1))  # scale down to correct # of parameters
        layer_size = model_scale * 8 * 8 * 8
        if is_square(layer_size):  # work out layer dimensions
            layer_l = int(math.sqrt(layer_size) + 0.5)
            layer_r = layer_l
        else:
            layer_m = math.log(math.sqrt(layer_size), 2)
            layer_l = 2**math.ceil(layer_m)
            layer_r = layer_size // layer_l
        layer_l = int(layer_l)
        layer_r = int(layer_r)
        model.add(
            Reshape((layer_l, layer_r))
        )  # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
        model.add(LocallyConnected1D(layer_r, 1, activation='elu'))
        model.add(Permute((2, 1)))
        model.add(LocallyConnected1D(layer_l, 1, activation='elu'))
        model.add(Permute((2, 1)))
        model.add(LocallyConnected1D(layer_r, 1, activation='elu'))
        model.add(Permute((2, 1)))
        model.add(LocallyConnected1D(layer_l, 1, activation='elu'))
        model.add(Reshape(
            (model_scale, 512)))  # train against all dlatent values

    model.compile(loss='logcosh', metrics=[],
                  optimizer='adam')  # Adam optimizer, logcosh used for loss.
    model.summary()
    return model
コード例 #15
0
ファイル: NA-ANN.py プロジェクト: laurasteinmann/NA_ANN
def build_network(arc, drop_rate, LC, DG):
    def add_drops(model, drop_out, k):
        if DG[k].upper() == 'D':
            model.add(Dropout(drop_out[0]))
        elif DG[k].upper() == 'G':
            model.add(GaussianNoise(drop_out[k]))
        elif DG[k].upper() == "A":
            model.add(AlphaDropout(drop_out[k]))
        else:
            pass
        return model

    DG = DG.strip().split(",")
    arc = arc.strip().split(",")
    archit = []
    for layer in arc:
        archit.append(int(layer))
    layer_number = len(archit)
    drop_rate = drop_rate.strip().split(",")
    drop_out = []
    for drops in drop_rate:
        drop_out.append(float(drops))
    model = Sequential()
    if LC == True:
        model.add(
            Reshape(input_shape=(x_train.shape[1], x_train.shape[2]),
                    target_shape=(x_train.shape[1], x_train.shape[2]))
        )  #x.shape[2] the different shape of the encoding data
        model.add(
            LocallyConnected1D(
                1,
                10,
                strides=7,
                input_shape=(x_train.shape[1],
                             x_train.shape[2])))  #same as line 107
        model.add(Flatten())
        start = 0
        model = add_drops(model, drop_out, start)
    elif LC == False:
        model.add(
            Dense(archit[0],
                  kernel_initializer='truncated_normal',
                  activation=act,
                  input_shape=(x_train.shape[1],
                               x_train.shape[2])))  #same as line 107
        model = add_drops(model, drop_out, start)
    start = 1
    for k in range(start, len(archit)):
        model.add(
            Dense(archit[k],
                  kernel_initializer='truncated_normal',
                  activation=act))
        model = add_drops(model, drop_out, k)
    model.add(Dense(1, kernel_initializer='truncated_normal'))
    return (model)
コード例 #16
0
def build_DNN(p, coeff=0):

    input = Input(name='input', shape=(p, 2))
    show_layer_info('Input', input)

    local1 = LocallyConnected1D(filterNum,
                                1,
                                use_bias=bias,
                                kernel_initializer=Constant(value=0.1))(input)
    show_layer_info('LocallyConnected1D', local1)

    local2 = LocallyConnected1D(1,
                                1,
                                use_bias=bias,
                                kernel_initializer='glorot_normal')(local1)
    show_layer_info('LocallyConnected1D', local2)

    flat = Flatten()(local2)
    show_layer_info('Flatten', flat)

    dense1 = Dense(p,
                   activation=activation,
                   use_bias=bias,
                   kernel_initializer='glorot_normal',
                   kernel_regularizer=regularizers.l1(coeff))(flat)
    show_layer_info('Dense', dense1)

    dense2 = Dense(p,
                   activation=activation,
                   use_bias=bias,
                   kernel_initializer='glorot_normal',
                   kernel_regularizer=regularizers.l1(coeff))(dense1)
    show_layer_info('Dense', dense2)

    out_ = Dense(1, activation='sigmoid',
                 kernel_initializer='glorot_normal')(dense2)
    show_layer_info('Dense', out_)

    model = Model(inputs=input, outputs=out_)
    # model.compile(loss='mse', optimizer='adam')
    model.compile(loss='binary_crossentropy', optimizer='adam')
    return model
コード例 #17
0
def learn_slow_model(num_pixels=256, num_classes=10, initializer_val=100000):
    model = Sequential()
    model.add(
        LocallyConnected1D(
            num_pixels,
            3,
            input_shape=(num_pixels, 1),
            kernel_initializer=initializers.Constant(value=initializer_val),
            activation='elu'))
    model.add(
        LocallyConnected1D(
            128,
            3,
            kernel_initializer=initializers.Constant(value=initializer_val),
            activation='elu'))
    model.add(
        LocallyConnected1D(
            64,
            3,
            kernel_initializer=initializers.Constant(value=initializer_val),
            activation='elu'))
    model.add(
        LocallyConnected1D(
            32,
            3,
            kernel_initializer=initializers.Constant(value=initializer_val),
            activation='elu'))
    model.add(
        LocallyConnected1D(
            16,
            3,
            kernel_initializer=initializers.Constant(value=initializer_val),
            activation='relu'))
    model.add(Flatten())
    model.add(
        Dense(num_classes,
              kernel_initializer=initializers.Constant(value=initializer_val),
              activation='sigmoid'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
コード例 #18
0
def main():
    ext = extension_from_parameters()

    out_dim = 1
    loss = 'mse'
    metrics = None
    #metrics = ['accuracy'] if CATEGORICAL else None

    reshape = LOCALLY_CONNECTED_LAYERS is not None

    datagen = RegressionDataGenerator()
    train_gen = datagen.flow(batch_size=BATCH_SIZE, reshape=reshape)
    val_gen = datagen.flow(val=True, batch_size=BATCH_SIZE, reshape=reshape)
    val_gen2 = datagen.flow(val=True, batch_size=BATCH_SIZE, reshape=reshape)

    model = Sequential()

    if LOCALLY_CONNECTED_LAYERS:
        for layer in LOCALLY_CONNECTED_LAYERS:
            if layer:
                model.add(
                    LocallyConnected1D(*layer,
                                       input_shape=(datagen.input_dim, 1),
                                       activation=ACTIVATION))
                if POOL:
                    model.add(MaxPooling1D(pool_length=POOL))
        model.add(Flatten())

    for layer in DENSE_LAYERS:
        if layer:
            model.add(
                Dense(layer,
                      input_dim=datagen.input_dim,
                      activation=ACTIVATION))
            if DROP:
                model.add(Dropout(DROP))
    model.add(Dense(out_dim))

    model.summary()
    model.compile(loss=loss, optimizer='sgd', metrics=metrics)

    train_samples = int(datagen.n_train / BATCH_SIZE) * BATCH_SIZE
    val_samples = int(datagen.n_val / BATCH_SIZE) * BATCH_SIZE

    history = BestLossHistory(val_gen2, val_samples, ext)
    checkpointer = ModelCheckpoint(filepath='model' + ext + '.h5',
                                   save_best_only=True)

    model.fit_generator(train_gen,
                        train_samples,
                        nb_epoch=NB_EPOCH,
                        validation_data=val_gen,
                        nb_val_samples=val_samples,
                        callbacks=[history, checkpointer])
コード例 #19
0
ファイル: models.py プロジェクト: albertotb/solar
def conv1D_lon_lat(idx_sensor, n_sensors=16):
    ''' Returns a model using all the sensors to predict index_sensor '''
    xin = Input(shape=(n_sensors, 1), name='lon_input')
    x = LocallyConnected1D(8, 7, data_format='channels_last',
                           padding='valid')(xin)
    x = Activation('relu')(x)
    x = LocallyConnected1D(16, 5, data_format='channels_last',
                           padding='valid')(x)
    x = Activation('relu')(x)
    x = Conv1D(32, 3, data_format='channels_last', padding='causal')(x)
    xl = Flatten()(x)

    yin = Input(shape=(n_sensors, 1), name='lat_input')
    y = LocallyConnected1D(8, 7, data_format='channels_last',
                           padding='valid')(xin)
    y = Activation('relu')(x)
    y = LocallyConnected1D(16, 5, data_format='channels_last',
                           padding='valid')(x)
    y = Activation('relu')(x)
    y = Conv1D(32, 3, data_format='channels_last', padding='causal')(x)
    yl = Flatten()(y)

    xc = Concatenate()([xl, yl])
    xc = Dropout(0.2)(xc)
    xo = Dense(1)(xc)

    # use date info here?
    xinf = Flatten()(xin)
    s = Dense(5)(xinf)
    s = Activation('tanh')(s)
    s = Dense(2)(s)
    s = Activation('softmax')(s)

    # sort of residual connection
    xin_0 = Activation('relu')(xin)
    xin_1 = Lambda(lambda x: x[:, idx_sensor, :])(xin_0)
    xo_m = Dot(axes=1)([Concatenate()([xo, xin_1]), s])
    xo_m = Activation('relu')(xo_m)

    model = Model(inputs=[xin, yin], outputs=[xo_m])
    return model
コード例 #20
0
ファイル: backprop.py プロジェクト: Detry322/map-creator
def backprop_single_image(generator, image):
    i = Input(shape=(100, 1))
    local = LocallyConnected1D(1, 1, use_bias=False)
    l = local(i)
    r = Reshape((100, ))(l)
    output = generator(r)
    model = Model(inputs=i, outputs=output)
    model.compile(loss='mean_squared_error', optimizer=SGD(lr=75))
    X = np.array([[[1.0]] * NOISE_SIZE])
    Y = np.array([image])
    model.fit(X, Y, epochs=200)
    return local.get_weights()
コード例 #21
0
  def AddBranchLC1D(self, seqConvFilters, seqMaxPoolings, subsampling, seqKernelSizes, dropout, inputType, activation='relu'):
    self.fRequestedData.append(inputType)
    branchID = len(self.fTempModelBranches)

    inputLayer = Input(shape=self.fDataGenerator.GetDataTypeShape(inputType), name='B{:d}_LC1D_{:s}'.format(branchID, inputType))
    for i in range(0, len(seqConvFilters)):
      if i==0:
        model = LocallyConnected1D(seqConvFilters[i], seqKernelSizes[i], activation=activation, kernel_initializer=self.fInit, strides=subsampling, padding='valid', name=self.GetCompatibleName('B{:d}_LC1D_{:d}_Kernels{}_Stride_{:d}_activation_{:s}'.format(branchID, i, seqKernelSizes, subsampling, activation)))(inputLayer)
      else:
        model = LocallyConnected1D(seqConvFilters[i], seqKernelSizes[i], activation=activation, kernel_initializer=self.fInit, padding='valid', name=self.GetCompatibleName('B{:d}_LC1D_{:d}_Kernels{}_activation_{:s}'.format(branchID, i, seqKernelSizes, activation)))(model)

      if seqMaxPoolings[i] > 0:
        model = MaxPooling1D(pool_size=seqMaxPoolings[i], name='B{:d}_LC1D_{:d}_{}'.format(branchID, i, seqMaxPoolings[i]))(model)
      if dropout:
        model = Dropout(dropout, name='B{:d}_LC1D_{:d}_{:3.2f}'.format(branchID, i, dropout))(model)
    model = Flatten(name='B{:d}_LC1D_Output'.format(branchID))(model)

    self.fModelBranchesOutput.append('B{:d}_LC1D_Output'.format(branchID))
    self.fModelBranchesInput.append(inputLayer.name)
    self.fTempModelBranchesInput.append(inputLayer)
    self.fTempModelBranches.append(model)
def stack2(data,
           target_shape,
           l2=1e-3,
           hid_size=16,
           hid_dropout=None,
           shared=True):
    model = Sequential()
    model.add(
        InputLayer(name='numeric_columns__',
                   input_shape=[len(data.numeric_columns)]))
    model.add(Reshape([-1, 6]))
    model.add(Permute((2, 1)))
    if shared:
        model.add(
            Conv1D(hid_size,
                   1,
                   kernel_regularizer=regularizers.l2(l2),
                   activation='relu'))
        if hid_dropout is not None:
            model.add(Dropout(hid_dropout))
        model.add(
            Conv1D(1,
                   1,
                   kernel_regularizer=regularizers.l2(l2),
                   activation='sigmoid'))
    else:
        model.add(
            LocallyConnected1D(hid_size,
                               1,
                               kernel_regularizer=regularizers.l2(l2),
                               activation='relu'))
        if hid_dropout is not None:
            model.add(Dropout(hid_dropout))
        model.add(
            LocallyConnected1D(1,
                               1,
                               kernel_regularizer=regularizers.l2(l2),
                               activation='sigmoid'))
    model.add(Reshape([6]))
    return model
def learn_effec_model_mom2(num_pixels=256,
                           num_classes=10,
                           learning_rate=0.001,
                           momentum=0.9):
    model = Sequential()
    model.add(
        LocallyConnected1D(num_pixels,
                           3,
                           input_shape=(num_pixels, 1),
                           kernel_initializer='normal',
                           activation='elu'))
    model.add(
        LocallyConnected1D(128,
                           3,
                           kernel_initializer='normal',
                           activation='elu'))
    model.add(
        LocallyConnected1D(64,
                           3,
                           kernel_initializer='normal',
                           activation='elu'))
    model.add(
        LocallyConnected1D(32,
                           3,
                           kernel_initializer='normal',
                           activation='elu'))
    model.add(
        LocallyConnected1D(16,
                           3,
                           kernel_initializer='normal',
                           activation='relu'))
    model.add(Flatten())
    model.add(
        Dense(num_classes, kernel_initializer='normal', activation='sigmoid'))
    optimizer = optimizers.SGD(lr=learning_rate, momentum=momentum)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
コード例 #24
0
def build_godard_ensemble(input_shape=(96, 96, 3), ensemble_size=2):
    inputs = Input(shape=input_shape)
    ensemble_outputs = [
        _godard(inputs, input_shape=input_shape) for i in range(ensemble_size)
    ]
    for i, x in enumerate(ensemble_outputs):
        x = Reshape((17, 1))(x)
        x = LocallyConnected1D(1, 1)(x)
        x = Reshape((17, ))(x)
        ensemble_outputs[i] = x

    outputs = average(ensemble_outputs)
    return Model(inputs=inputs, outputs=outputs)
コード例 #25
0
ファイル: ConvModel.py プロジェクト: naykun/MusicResearch
def get_conv1d_model_a(input_shape, output_shape, timestep):
    print('input shape is', input_shape)
    inputs = Input(shape=input_shape)
    xxx = Reshape((input_shape[0], -1))(inputs)
    if (first_conv):
        xxx = Conv1D(filters=s_filter_num,
                     kernel_size=m_filter_size,
                     padding='same',
                     activation=default_activation,
                     strides=1,
                     dilation_rate=1)(xxx)

    print(xxx.shape)
    xxx = LocallyConnected1D(filters=s_filter_num,
                             kernel_size=s_filter_size,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    xxx = LocallyConnected1D(filters=l_filter_num,
                             kernel_size=s_filter_num,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)
    xxx = LocallyConnected1D(filters=xl_filter_num,
                             kernel_size=m_filter_num,
                             padding='valid',
                             activation=default_activation,
                             strides=1)(xxx)
    xxx = BatchNormalization()(xxx)
    xxx = GlobalMaxPooling1D()(xxx)

    flatten_out_shape = output_shape[0] * output_shape[1]
    xxx = Dense(flatten_out_shape)(xxx)
    predictions = Reshape(output_shape)(xxx)

    model = Model(inputs=inputs, outputs=predictions)
    return model
コード例 #26
0
def loc_con_1d_model(filters, kernel_size, strides, drop_rate, dense_units1,
                     dense_units_final, optimizer, loss):
    """
    This function reads in various parameters to compiles a
    LocallyConnected1D model, consisting of various layers
    including Dropout, Flatten and Dense. The function returns
    the model summary.
    Input: Various parameters including filter size, kernel size,
    number of strides, x and y dimensional input values, dropout
    rate (for Dropout Layers), dense units (for Dense Layers) and
    the optimizer and loss methods for the model.compile function.
    Output: model summary (based on model.summary() object)
    """
    # make a global variable
    global model

    # import shape for inputs
    shape = data_shape(sequences)  # noqa: F821
    input_x = shape[1]
    input_y = shape[2]

    # initialize model
    model = Sequential()
    model.add(
        LocallyConnected1D(filters,
                           kernel_size,
                           strides=strides,
                           input_shape=(input_x, input_y),
                           activation='relu'))

    # additional layers
    #     model.add(Dense(50))
    #     model.add(Dropout(drop_rate))
    #     model.add(LocallyConnected1D(50, 15, activation='relu'))
    #     model.add(Dense(10))
    #     model.add(Dropout(drop_rate))
    #     model.add(Dense(10))
    #     model.add(Dropout(drop_rate))
    model.add(Dense(dense_units1))
    model.add(Dropout(drop_rate))

    # final flatten and dense layers
    model.add(Flatten())
    model.add(Dense(dense_units_final))

    # compile model
    model.compile(optimizer=optimizer, loss=loss, metrics=['mae', 'acc'])

    # return model summary
    return (model, model.summary())
コード例 #27
0
ファイル: models.py プロジェクト: albertotb/solar
def conv1D_lon_LSTM(n_steps=3, n_sensors=16):
    ''' Returns a model using all the sensors to predict index_sensor '''
    xin = Input(shape=(n_steps, n_sensors, 1), name='main_input')
    x = TimeDistributed(
        LocallyConnected1D(8,
                           7,
                           data_format='channels_last',
                           padding='valid',
                           activation='relu'))(xin)
    x = TimeDistributed(
        LocallyConnected1D(16,
                           5,
                           data_format='channels_last',
                           padding='valid',
                           activation='relu'))(x)
    x = TimeDistributed(
        Conv1D(32, 3, data_format='channels_last', padding='causal'))(x)
    xl = TimeDistributed(Flatten())(x)
    xl = LSTM(20)(xl)
    xl = Dropout(0.2)(xl)
    xo = Dense(n_sensors)(xl)

    model = Model(inputs=[xin], outputs=[xo])
    return model
コード例 #28
0
def make_critics():
    model = Sequential()
    model.add(
        Dense(was_dim * 3,
              input_shape=(mid_dim, ),
              activation='relu',
              kernel_regularizer=l2(tt)))
    model.add(Reshape((was_dim * 3, 1)))
    model.add(
        LocallyConnected1D(1,
                           was_dim,
                           strides=was_dim,
                           kernel_regularizer=l2(tt)))
    model.add(Reshape((3, )))
    return model
コード例 #29
0
def m3rsl_local_switch(seq_size, n_features):
    layers = list()
    layers.append(
        LocallyConnected1D(64,
                           kernel_size=5,
                           activation='selu',
                           padding='valid',
                           strides=1,
                           input_shape=(seq_size, n_features)))
    layers.append(AveragePooling1D(pool_size=4))
    layers.append(LSTM(70, kernel_regularizer=regularizers.l2(0.01)))
    layers.append(Activation('selu'))
    layers.append(Switch(1))
    layers.append(Dense(1, kernel_regularizer=regularizers.l2(0.01)))
    layers.append(Activation('sigmoid'))

    freezable = [0, 1]
    return layers, freezable
コード例 #30
0
ファイル: delphes_AdaptMe.py プロジェクト: yangzch/DeepJet
def myDomAdaptModel(Inputs, nclasses, nregclasses, dropoutRate=0.05):

    X = Dense(60, activation='relu')(Inputs[0])  #reco inputs

    X = Dropout(dropoutRate)(X)
    X = Dense(60, activation='relu', name='classifier_dense0')(X)
    X = Dropout(dropoutRate)(X)
    X = Dense(60, activation='relu', name='classifier_dense1')(X)
    X = Dropout(dropoutRate)(X)
    X = Dense(60, activation='relu', name='classifier_dense2')(X)
    X = Dropout(dropoutRate)(X)
    Xa = Dense(20, activation='relu', name='classifier_dense3')(X)

    X = Dense(10, activation='relu', name='classifier_dense4')(Xa)
    #three labels
    labelpred = Dense(3, activation='softmax', name='classifier_pred')(X)

    Ad = GradientReversal(name='da_gradrev0')(Xa)
    Ad = Dense(30, activation='relu', name='da_dense0')(Ad)
    X = Dropout(dropoutRate)(X)
    Ad = Dense(30, activation='relu', name='da_dense1')(Ad)
    X = Dropout(dropoutRate)(X)
    Ad = Dense(30, activation='relu', name='da_dense2')(Ad)
    Ad = Dense(1, activation='sigmoid')(Ad)

    #make list out of it, three labels from truth - make weights
    Weight = Reshape((3, 1), name='reshape')(Inputs[1])

    # one-by-one apply weight to label
    Weight = LocallyConnected1D(1,
                                1,
                                activation='linear',
                                use_bias=False,
                                name="weight0")(Weight)

    Weight = Flatten()(Weight)

    Weight = GradientReversal()(Weight)

    Ad = Concatenate(name='domada0')([Ad, Weight])

    predictions = [labelpred, Ad]
    model = Model(inputs=Inputs, outputs=predictions)
    return model