def create_time_model(Xtrain, Ttrain, Xtest, Ttest):
    def batch_generator(x, t):
        i = 0
        while True:
            if i == len(x):
                i = 0
            else:
                xtrain, ytrain = x[i], t[i]
                i += 1
            yield xtrain, ytrain

    steps_per_epoch = len(Xtrain)
    val_steps = len(Xtest)
    model = Sequential()
    csv_logger = CSVLogger('time_log.csv', append=True, separator='\t')
    layer = conditional({{choice(['one', 'two'])}})
    if layer == 'two':
        returnseq = True
    else:
        returnseq = False
    model.add(
        LSTM(units={{choice([32, 64, 128, 256])}},
             input_shape=(None, Xtrain[0].shape[2]),
             kernel_regularizer=L2({{uniform(0, 1)}}),
             dropout={{uniform(0, 1)}},
             return_sequences=returnseq))
    if layer == 'two':
        model.add(
            LSTM(units={{choice([256, 512])}},
                 input_shape=(None, Xtrain[0].shape[2]),
                 kernel_regularizer=L2({{uniform(0, 1)}}),
                 dropout={{uniform(0, 1)}}))
    model.add(Dense({{choice([1024, 512])}}))
    model.add(Activation('relu'))
    model.add({{choice([Dropout(0.5), Activation('linear')])}})
    model.add(Dense(Ttrain[0].shape[1]))
    model.compile(loss='mean_squared_error',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['cosine'])
    model.summary()
    history = model.fit_generator(batch_generator(Xtrain, Ttrain),
                                  steps_per_epoch=len(Xtrain),
                                  epochs=5,
                                  callbacks=[csv_logger],
                                  verbose=2,
                                  validation_data=batch_generator(
                                      Xtest, Ttest),
                                  validation_steps=len(Xtest))
    score, acc = model.evaluate_generator(batch_generator(Xtest, Ttest),
                                          steps=len(Xtest))
    return {'loss': acc, 'model': model, 'status': STATUS_OK}
示例#2
0
def _residual_drop(x, input_shape, output_shape, strides=(1, 1)):
  global add_tables
  #nb_filter = output_shape[0]
  nb_filter = 32
  print(nb_filter)
  print(x.shape)
  conv = Convolution2D(nb_filter, (3, 3), subsample=strides, padding="same", kernel_regularizer=L2(weight_decay))(x)
  conv = BN(axis=1)(conv)
  conv = Activation("relu")(conv)
  conv = Convolution2D(nb_filter, (3, 3), padding="same", kernel_regularizer=L2(weight_decay))(conv)
  conv = BN(axis=1)(conv)
  if strides[0] >= 2:
      x = AveragePooling2D(strides)(x)
  if (output_shape[0] - input_shape[0]) > 0:
      pad_shape = (1,
                   output_shape[0] - input_shape[0],
                   output_shape[1],
                   output_shape[2])
      padding = K.zeros(pad_shape)
      padding = K.repeat_elements(padding, K.shape(x)[0], axis=0)
      x = Lambda(lambda y: K.concatenate([y, padding], axis=1),
                 output_shape=output_shape)(x)
  _death_rate = K.variable(death_rate)
  scale = K.ones_like(conv) - _death_rate
  conv = Lambda(lambda c: K.in_test_phase(scale * c, c),
                output_shape=output_shape)(conv)
  print(x.shape)
  print(conv.shape)
  out = add([x, x])
  out = Activation("relu")(out)
  gate = K.variable(1, dtype="uint8")
  add_tables += [{"death_rate": _death_rate, "gate": gate}]
  return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]),
                output_shape=output_shape)([out, x])
示例#3
0
def build_residual_drop():
  """ ANCHOR """
  inputs = Input(shape=(3, 224, 224))
  net = Convolution2D(16, (3, 3), padding="same", kernel_regularizer=L2(weight_decay))(inputs)
  net = BN(axis=1)(net)
  net = Activation("relu")(net)
  #for i in range(18):
  net = _residual_drop(net, input_shape=(16, 32, 32), output_shape=(16, 32, 32))
  net = _residual_drop(net, input_shape=(16, 32, 32), output_shape=(16, 32, 32))
def create_fix_model(Xtrain, Ttrain, Xtest, Ttest):
    csv_logger = CSVLogger('fix_log.csv', append=True, separator='\t')
    model = Sequential()
    layer = conditional({{choice(['one', 'two'])}})
    if layer == 'two':
        returnseq = True
    else:
        returnseq = False
    model.add(
        LSTM(units={{choice([32, 64, 128, 256])}},
             input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
             kernel_regularizer=L2({{uniform(0, 1)}}),
             dropout={{uniform(0, 1)}},
             return_sequences=returnseq))
    if layer == 'two':
        model.add(
            LSTM(units={{choice([256, 512])}},
                 input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
                 kernel_regularizer=L2({{uniform(0, 1)}}),
                 dropout={{uniform(0, 1)}}))
    model.add(Dense({{choice([1024, 512])}}))
    model.add(Activation('relu'))
    model.add({{choice([Dropout(0.5), Activation('linear')])}})
    model.add(Dense(Ttrain.shape[1]))
    model.compile(loss='mean_squared_error',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['cosine'])
    model.summary()
    model.fit(Xtrain,
              Ttrain,
              batch_size=50,
              epochs=5,
              callbacks=[csv_logger],
              verbose=2,
              validation_data=(Xtest, Ttest))
    score, acc = model.evaluate(Xtest, Ttest, verbose=0)
    return {'loss': acc, 'model': model, 'status': STATUS_OK}
示例#5
0
    def build_discriminator(self):
        reg_rate = self.reg_rate_D
        model = Sequential(name='discriminator')
        ########################################
        # input->hidden
        model.add(
            Dense(units=150,
                  input_dim=self.n_items,
                  activation='sigmoid',
                  use_bias=True,
                  kernel_initializer='random_uniform',
                  bias_initializer='random_uniform',
                  bias_regularizer=L2(reg_rate),
                  kernel_regularizer=L2(reg_rate)))
        # stacked hidden layers
        model.add(
            Dense(150,
                  activation='sigmoid',
                  use_bias=True,
                  kernel_initializer='random_uniform',
                  bias_initializer='random_uniform',
                  bias_regularizer=L2(reg_rate),
                  kernel_regularizer=L2(reg_rate)))
        model.add(
            Dense(150,
                  activation='sigmoid',
                  use_bias=True,
                  kernel_initializer='random_uniform',
                  bias_initializer='random_uniform',
                  bias_regularizer=L2(reg_rate),
                  kernel_regularizer=L2(reg_rate)))
        # hidden -> output
        model.add(
            Dense(units=1,
                  activation='sigmoid',
                  use_bias=True,
                  kernel_initializer='random_uniform',
                  bias_initializer='random_uniform',
                  bias_regularizer=L2(reg_rate),
                  kernel_regularizer=L2(reg_rate)))
        ########################################
        model.summary()

        input_profile = Input(shape=(self.n_items, ))
        validity = model(input_profile)
        return Model(input_profile, validity)
示例#6
0
文件: layers.py 项目: alope107/MaLICE
    def build(self, input_shape):
        init_I_mean = tf.math.reduce_mean(self.init_I)
        init_I_std = tf.math.reduce_std(self.init_I)
        self.I_offset = self.add_weight(name='ref_I',
                                        shape=(self.n_res, ),
                                        initializer=Constant(0.001),
                                        constraint=MinMaxNorm(
                                            min_value=self.I_offset_lower,
                                            max_value=self.I_offset_upper,
                                            rate=1.0),
                                        trainable=True)

        self.dR2 = self.add_weight(
            shape=(1, ),
            #initializer=Constant(dR2),
            initializer=RandomUniform(minval=self.dR2_lower,
                                      maxval=self.dR2_upper),
            constraint=MinMaxNorm(min_value=self.dR2_lower,
                                  max_value=self.dR2_upper,
                                  rate=1.0),
            trainable=True)
        self.amp_scaler = self.add_weight(
            name='amp_scaler',
            shape=(1, ),
            #initializer=Constant(amp_scaler),#
            initializer=RandomNormal(mean=float(5 * init_I_mean),
                                     stddev=float(5 * init_I_std)),
            constraint=MinMaxNorm(min_value=self.amp_scaler_lower,
                                  max_value=self.amp_scaler_upper),
            trainable=True)

        self.delta_w = self.add_weight(name='delta_w',
                                       shape=(self.n_res, ),
                                       initializer=Constant(self.larmor / 100),
                                       constraint=MinMaxNorm(
                                           min_value=self.delta_w_lower,
                                           max_value=self.delta_w_upper,
                                           rate=1.0),
                                       regularizer=L2(1e-2),
                                       trainable=True)
示例#7
0
print(X_train.shape[0], ' training samples')
print(X_test.shape[0], ' testing samples')

# Convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, N_CLASSES)
Y_test = np_utils.to_categorical(y_test, N_CLASSES)

# N_CLASSES outputs, final stage is normalized via softmax
model = Sequential()
#1st layer
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED, )))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
#2nd layer
model.add(Dense(N_HIDDEN, kernel_regularizer=L2(0.01)))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
#Output layer
model.add(Dense(N_CLASSES))
model.add(Activation('softmax'))
model.summary()

# Compile the model
model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=[METRIC])

# Train the model
history = model.fit(X_train, Y_train, \
                    batch_size=BATCH_SIZE,\
                    epochs=N_EPOCH,\
                    verbose=VERBOSE,\
示例#8
0
def build_model(hp):
  hidden_layers = hp.Choice('hidden_layers', values=[1,2,3])
  activation_choice = hp.Choice('activation', values=['relu', 'selu', 'elu'])
  model = Sequential()
  model.add(Dense(units=hp.Int('units',min_value=256,max_value=512,step=32),activation=activation_choice, input_shape=(x_train.shape[1], ), kernel_regularizer=L2(0.001)))
  model.add(Dropout(0.3))
  for i in range(hidden_layers):
    model.add(Dense(units=hp.Int(f'layer_{i}_units_',min_value=32//(i+1), max_value=128//(i+1),step=64//(i+1)),activation=activation_choice, kernel_regularizer=L2(0.001)))
  model.add(Dense(1))  
  model.compile(optimizer='rmsprop', loss="mse", metrics=["mae"])
  return model
示例#9
0
time_steps = 150

filters = 60
kernel_size = 4
padding = 'valid'
strides = 1
pool_size = 2

dropout_rate = 0.5
learning_rate = 0.01
batch_size = 64
total_epoch = 20

activation_def = 'relu'
optimizer_def = Adam()
regularizer_def = L2(0.0001)

# New model
model = Sequential()

# LSTM Layer
model.add(LSTM(embedding_size, input_shape=(batch_size, time_steps, embedding_size), return_sequences=True, kernel_regularizer=regularizer_def))
# model.add(LSTM(embedding_size, input_shape=(batch_size, time_steps, embedding_size), kernel_regularizer=regularizer_def))

# 2 1-D Convolution Stage
model.add(Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=activation_def))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=activation_def))
model.add(MaxPooling1D(pool_size=pool_size))

model.add(Flatten())
示例#10
0
文件: train.py 项目: xfreed/tOne-AI
                                                     ref_power=1e-5,
                                                     top_db=None)

        spec = np.clip(spec, 0, 100)
        np.save(spec_file, spec.astype('float16'), allow_pickle=False)

    # Define model
    logger.info('Constructing model...')

    input_shape = 1, MEL_BANDS, SEGMENT_LENGTH

    model = keras.models.Sequential()

    model.add(
        Conv(80, (3, 3),
             kernel_regularizer=L2(0.001),
             kernel_initializer='he_uniform',
             input_shape=input_shape))
    model.add(LeakyReLU())
    model.add(Pool((3, 3), (3, 3)))

    model.add(
        Conv(160, (3, 3),
             kernel_regularizer=L2(0.001),
             kernel_initializer='he_uniform'))
    model.add(LeakyReLU())
    model.add(Pool((3, 3), (3, 3)))

    model.add(
        Conv(240, (3, 3),
             kernel_regularizer=L2(0.001),
示例#11
0
def main():
    train_set, total_train_text = load_text(True)
    test_set, total_test_text = load_text(False)
    print(len(total_train_text))

    if exist('tokenizer'):
        tokenizer = load_obj('tokenizer')
        print('load tokenizer')
    else:
        tokenizer = Tokenizer(VOCA_SIZE)
        tokenizer.fit_on_texts(total_train_text)
        save_obj(tokenizer, 'tokenizer')
        print('save tokenizer')

    if exist('embedding_mat'):
        embedding_mat = load_obj('embedding_mat')
    else:
        embedding_mat = make_embedding(tokenizer, VOCA_SIZE, EM_DIM)
        save_obj(embedding_mat, 'embedding_mat')

    if exist('total_data'):
        print('import total data')
        total_data = load_obj('total_data')
    else:
        train_titles = []
        train_texts = []
        train_labels = []
        total_stock = stock_data()

        for i in range(10):
            print(i)
            train_titles += train_set[i][1]
            train_texts += train_set[i][2]
            dates = train_set[i][0]

            stock, sdate, updown = zip(*total_stock[i])
            date2updown = dict(zip(sdate, updown))
            labels = list(map(lambda d: date2updown[d], dates))

            train_labels += labels

        titles_seq = pad_sequences(tokenizer.texts_to_sequences(train_titles),
                                   maxlen=TITLE_LEN)
        texts_seq = pad_sequences(tokenizer.texts_to_sequences(train_texts),
                                  maxlen=TEXT_LEN)
        return train_titles, train_texts

        test_titles = []
        teset_texts = []
        test_labels = []
        for i in range(10):
            dates, titles, texts = test_set[i]
            titles_seq = pad_sequences(tokenizer.texts_to_sequences(titles),
                                       maxlen=TITLE_LEN)
            texts_seq = pad_sequences(tokenizer.texts_to_sequences(texts),
                                      maxlen=TEXT_LEN)

            stock, sdate, updown = zip(*total_stock[i])
            date2updown = dict(zip(sdate, updown))
            labels = list(map(lambda d: date2updown[d], dates))
            test_titles += titles_seq
            test_texts += texts_seq
            test_labels += labels

        total_data = [
            train_titles, train_texts, train_labels, test_titles, test_texts,
            test_labels
        ]
        return total_data
        save_obj(total_data, 'total_data')
        '''
        for t in titles:
            t_seq = pad_sequences(tokenizer.texts_to_sequences(t), maxlen=TITLE_LEN)
            titles_seq.append(t_seq)
        for t in texts:
            t_seq = pad_sequences(tokenizer.texts_to_sequences(t), maxlen=TEXT_LEN)
            texts_seq.append(t_seq)

        '''

    n_symbols = len(embedding_mat)
    print(n_symbols)

    print('total:', len(total_data))
    train_data = total_data[:-test_sample]
    test_data = total_data[-test_sample:]
    random.shuffle(train_data)

    train_x1, train_x2, train_y = zip(*train_data)
    test_x1, test_x2, test_y = zip(*test_data)
    train_x1 = np.array(train_x1)
    train_x2 = np.array(train_x2)
    train_y = np.array(train_y)
    test_x1 = np.array(test_x1)
    test_x2 = np.array(test_x2)
    test_y = np.array(test_y)

    ## model ##
    K.clear_session()
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    sess = tf.Session(config=config)
    set_session(sess)

    print(embedding_mat.shape)

    ## A model ##
    model_A = Sequential()
    model_A.add(Embedding(output_dim = EM_DIM, input_dim = n_symbols, \
            weights = [embedding_mat], input_length = TITLE_LEN, \
            trainable=True))
    model_A.add(BatchNormalization())
    model_A.add(LSTM(hidden_dim1, return_sequences=True))
    #model_A.add(BatchNormalization())
    model_A.add(Reshape((TITLE_LEN, hidden_dim1, 1)))

    model_A1 = Sequential()
    model_A2 = Sequential()
    model_A3 = Sequential()
    model_A1.add(model_A)
    model_A2.add(model_A)
    model_A3.add(model_A)

    model_A1.add(Conv2D(num_filters, kernel_size=(filter_sizes[0], hidden_dim1), \
            padding='valid', kernel_initializer='normal',\
            kernel_regularizer=L2(beta)))
    model_A2.add(Conv2D(num_filters, kernel_size=(filter_sizes[1], hidden_dim1), \
            padding='valid', kernel_initializer='normal',\
            kernel_regularizer=L2(beta)))
    model_A3.add(Conv2D(num_filters, kernel_size=(filter_sizes[2], hidden_dim1), \
            padding='valid', kernel_initializer='normal',\
            kernel_regularizer=L2(beta)))

    model_A1.add(BatchNormalization())
    model_A2.add(BatchNormalization())
    model_A3.add(BatchNormalization())

    model_A1.add(LeakyReLU(0.3))
    model_A2.add(LeakyReLU(0.3))
    model_A3.add(LeakyReLU(0.3))

    model_A1.add(MaxPool2D(pool_size=(TITLE_LEN - filter_sizes[0] + 1, 1), \
            strides=(1,1), padding='valid'))
    model_A2.add(MaxPool2D(pool_size=(TITLE_LEN - filter_sizes[1] + 1, 1), \
            strides=(1,1), padding='valid'))
    model_A3.add(MaxPool2D(pool_size=(TITLE_LEN - filter_sizes[2] + 1, 1), \
            strides=(1,1), padding='valid'))

    input_A = Input(shape=(TITLE_LEN, ))
    out_A1 = model_A1(input_A)
    out_A2 = model_A2(input_A)
    out_A3 = model_A3(input_A)

    ## B model ##
    model_B = Sequential()
    model_B.add(Embedding(output_dim = EM_DIM, input_dim = n_symbols, \
            weights = [embedding_mat], input_length = TEXT_LEN, \
            trainable=True))
    model_B.add(BatchNormalization())
    model_B.add(LSTM(hidden_dim1, return_sequences=True))
    #model_A.add(BatchNormalization())
    model_B.add(Reshape((TEXT_LEN, hidden_dim1, 1)))

    model_B1 = Sequential()
    model_B2 = Sequential()
    model_B3 = Sequential()
    model_B1.add(model_B)
    model_B2.add(model_B)
    model_B3.add(model_B)

    model_B1.add(Conv2D(num_filters, kernel_size=(filter_sizes[0], hidden_dim1), \
            padding='valid', kernel_initializer='normal',\
            kernel_regularizer=L2(beta)))
    model_B2.add(Conv2D(num_filters, kernel_size=(filter_sizes[1], hidden_dim1), \
            padding='valid', kernel_initializer='normal',\
            kernel_regularizer=L2(beta)))
    model_B3.add(Conv2D(num_filters, kernel_size=(filter_sizes[2], hidden_dim1), \
            padding='valid', kernel_initializer='normal',\
            kernel_regularizer=L2(beta)))

    model_B1.add(BatchNormalization())
    model_B2.add(BatchNormalization())
    model_B3.add(BatchNormalization())

    model_B1.add(LeakyReLU(0.3))
    model_B2.add(LeakyReLU(0.3))
    model_B3.add(LeakyReLU(0.3))

    model_B1.add(MaxPool2D(pool_size=(TEXT_LEN - filter_sizes[0] + 1, 1), \
            strides=(1,1), padding='valid'))
    model_B2.add(MaxPool2D(pool_size=(TEXT_LEN - filter_sizes[1] + 1, 1), \
            strides=(1,1), padding='valid'))
    model_B3.add(MaxPool2D(pool_size=(TEXT_LEN - filter_sizes[2] + 1, 1), \
            strides=(1,1), padding='valid'))

    input_B = Input(shape=(TEXT_LEN, ))
    out_B1 = model_B1(input_B)
    out_B2 = model_B2(input_B)
    out_B3 = model_B3(input_B)

    concatenated_tensor = Concatenate(axis=3)(
        [out_A1, out_A2, out_A3, out_B1, out_B2, out_B3])
    flatten = Flatten()(concatenated_tensor)
    '''
    dropout = Dropout(drop)(flatten)
    fc = Dense(hidden_dim2, kernel_regularizer=L2(beta))(dropout)
    bn = BatchNormalization()(fc)
    flatten = LeakyReLU(0.3)(bn)
    '''

    output = Dense(1, activation='sigmoid',
                   kernel_regularizer=L2(beta))(flatten)

    final_model = Model(inputs=(input_A, input_B), outputs=output)
    adam = optimizers.Adam(lr=learning_rate,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=1e-8,
                           decay=0.0)
    final_model.compile(loss='binary_crossentropy',
                        optimizer=adam,
                        metrics=['accuracy'])

    final_model.summary()

    #early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)

    final_model.fit([train_x1, train_x2], train_y, batch_size = bs, epochs = ne, \
            verbose=1, validation_data=([test_x1, test_x2], test_y))#, callbacks=[early_stop])
示例#12
0
network.add(layers.Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation='relu', input_shape=input_shape, padding="same"))
network.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'))
network.add(layers.Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid'))
network.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
network.add(layers.Conv2D(120, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid'))
network.add(layers.Flatten())
network.add(layers.Dense(84, activation='relu'))
'''

network.add(
    layers.Conv2D(Channel,
                  kernel_size=Nf,
                  strides=Stride,
                  activation='relu',
                  input_shape=input_shape,
                  kernel_regularizer=L2(l2_r)))

#dropout: spegne casualmente qualche neurone
indexlayer = 0
if dropout:
    network.add(layers.Dropout(dropout_const))
    indexlayer += 1
network.add(layers.Flatten())
indexlayer += 1

# network.add(layers.Dense(N, activation='relu'))

network.add(layers.Dense(N, activation='softmax', kernel_regularizer=L2(l2_r)))

network.summary(
)  # Shows information about layers and parameters of the entire network
示例#13
0
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')  # Ignore log10 zero division
            spec = librosa.core.perceptual_weighting(spec, MEL_FREQS, amin=1e-5, ref_power=1e-5,
                                                     top_db=None)

        spec = np.clip(spec, 0, 100)
        np.save(spec_file, spec.astype('float16'), allow_pickle=False)

    # Define model
    logger.info('Constructing model...')

    input_shape = 1, MEL_BANDS, SEGMENT_LENGTH

    model = keras.models.Sequential()

    model.add(Conv(80, (3, 3), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform',
                   input_shape=input_shape))
    model.add(LeakyReLU())
    model.add(Pool((3, 3), (3, 3)))

    model.add(Conv(160, (3, 3), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform'))
    model.add(LeakyReLU())
    model.add(Pool((3, 3), (3, 3)))

    model.add(Conv(240, (3, 3), kernel_regularizer=L2(0.001), kernel_initializer='he_uniform'))
    model.add(LeakyReLU())
    model.add(Pool((3, 3), (3, 3)))

    model.add(Flatten())
    model.add(Dropout(0.5))