Example #1
0
 def _get_compiled_multi_io_model(self):
     model = get_multi_io_model()
     model.compile(
         optimizer='rmsprop',
         loss=losses.MeanSquaredError(),
         metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
         weighted_metrics=[
             metrics.MeanSquaredError(name='mean_squared_error_2')
         ])
     return model
 def _get_compiled_multi_io_model(self):
   model = get_multi_io_model()
   model.compile(
       optimizer='rmsprop',
       loss='mse',
       metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
       weighted_metrics=[
           metrics.MeanSquaredError(name='mean_squared_error_2')
       ],
       run_eagerly=test_utils.should_run_eagerly())
   return model
Example #3
0
 def _get_compiled_multi_io_model(self):
     model = get_multi_io_model()
     model.compile(
         optimizer="rmsprop",
         loss="mse",
         metrics=[metrics.MeanSquaredError(name="mean_squared_error")],
         weighted_metrics=[
             metrics.MeanSquaredError(name="mean_squared_error_2")
         ],
         run_eagerly=test_utils.should_run_eagerly(),
     )
     return model
 def _get_model(self):
   x = layers.Dense(3, kernel_initializer='ones', trainable=False)
   out = layers.Dense(
       1, kernel_initializer='ones', name='output', trainable=False)
   model = test_utils.get_model_from_layers([x, out], input_shape=(1,))
   model.compile(
       optimizer='rmsprop',
       loss='mse',
       metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
       weighted_metrics=[
           metrics.MeanSquaredError(name='mean_squared_error_2')
       ],
       run_eagerly=test_utils.should_run_eagerly())
   return model
Example #5
0
 def _get_model(self):
     x = layers.Dense(3, kernel_initializer="ones", trainable=False)
     out = layers.Dense(1,
                        kernel_initializer="ones",
                        name="output",
                        trainable=False)
     model = test_utils.get_model_from_layers([x, out], input_shape=(1, ))
     model.compile(
         optimizer="rmsprop",
         loss="mse",
         metrics=[metrics.MeanSquaredError(name="mean_squared_error")],
         weighted_metrics=[
             metrics.MeanSquaredError(name="mean_squared_error_2")
         ],
         run_eagerly=test_utils.should_run_eagerly(),
     )
     return model
Example #6
0
 def test_weighted(self):
     mse_obj = metrics.MeanSquaredError()
     y_true = K.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
                         (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
     y_pred = K.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
                         (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
     sample_weight = K.constant((1., 1.5, 2., 2.5))
     result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
     assert np.allclose(0.54285, K.eval(result), atol=1e-5)
Example #7
0
    def test_unweighted(self):
        mse_obj = metrics.MeanSquaredError()
        y_true = K.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
                             (1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
        y_pred = K.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
                            (0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))

        result = mse_obj(y_true, y_pred)
        assert np.allclose(0.5, K.eval(result), atol=1e-5)
Example #8
0
    def test_config(self):
        mse_obj = metrics.MeanSquaredError(name='my_mse', dtype='int32')
        assert mse_obj.name == 'my_mse'
        assert mse_obj.dtype == 'int32'

        # Check save and restore config
        mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
        assert mse_obj2.name == 'my_mse'
        assert mse_obj2.dtype == 'int32'
Example #9
0
 def test_weighted(self):
     mse_obj = metrics.MeanSquaredError()
     y_true = ((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
               (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
     y_pred = ((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
               (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
     sample_weight = (1., 1.5, 2., 2.5)
     result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
     np.isclose(0.54285, K.eval(result), atol=1e-5)
Example #10
0
    def test_unweighted(self):
        mse_obj = metrics.MeanSquaredError()
        y_true = ((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
                  (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
        y_pred = ((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
                  (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))

        result = mse_obj(y_true, y_pred)
        np.isclose(0.5, K.eval(result), atol=1e-5)
Example #11
0
         'use_bias': True,
         'kernel_initializer': 'Zeros',
         'kernel_regularizer': None,
         'bias_regularizer': None,
         'activity_regularizer': None,
         'kernel_constraint': None,
         'bias_constraint': None
     },
 },
 'model': {
     'compilation': {
         'optimizer': optimizers.Adam(
             learning_rate=0.001
             ),
         'loss': losses.MeanSquaredError(),
         'metrics': [metrics.MeanSquaredError(), metrics.MeanAbsoluteError()],
     }
 },
 'fit': {
     'batch_size': 32,
     'epochs': 100,
     'callbacks' : [
         callbacks.EarlyStopping(monitor='loss', patience=7), 
         callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10)
     ]
 },
 'Monte-Carlo': {
     'temperature': 100,
     'Number_of_steps': 100000,
     'box_size': 0.02
 }
Example #12
0
def models(labelarr, imgarr):
    pretrainmodel = load_model(
        '/home/som/lab/seed-yzj/newpaper4/laboratory/model/labotary_nose_att.hdf5',
        compile=False)
    pretrainmodel.compile(optimizer=Adam(lr=1e-4),
                          loss=losses.mean_squared_error,
                          metrics=[
                              metrics.MeanAbsoluteError(),
                              metrics.MeanAbsolutePercentageError(),
                              metrics.RootMeanSquaredError(), pearson_r
                          ])

    x_train, x_test, y_train, y_test = train_test_split(imgarr,
                                                        labelarr,
                                                        test_size=0.4,
                                                        random_state=3)
    testlen = x_test.shape[0]
    x_val = x_test[:int(testlen / 2)]
    y_val = y_test[:int(testlen / 2)]
    x_test = x_test[int(testlen / 2):]
    y_test = y_test[int(testlen / 2):]

    # input
    input = Input(shape=(x_train.shape[1], x_train.shape[2], x_train.shape[3]))

    x = ZeroPadding2D((3, 3))(input)
    x = Conv2d_BN(x,
                  nb_filter=64,
                  kernel_size=(7, 7),
                  strides=(2, 2),
                  padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    # (56,56,64)
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    # (28,28,128)
    x = Conv_Block(x,
                   nb_filter=128,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    # (14,14,256)
    x = Conv_Block(x,
                   nb_filter=256,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    # (7,7,512)
    x = Conv_Block(x,
                   nb_filter=512,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(3, 3))(x)
    x = Flatten()(x)

    fc6 = Dense(units=labelarr.shape[1],
                weights=pretrainmodel.get_layer('dense_27').get_weights(),
                trainable=False)(x)

    model = Model(input=input, output=fc6)
    model.summary()

    # model = multi_gpu_model(model, gpus=4)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss=losses.mean_squared_error,
                  metrics=[
                      metrics.MeanSquaredError(),
                      metrics.MeanAbsoluteError(),
                      metrics.MeanAbsolutePercentageError(),
                      metrics.RootMeanSquaredError(), pearson_r
                  ])

    model_checkpoint = ModelCheckpoint(
        '/home/som/lab/seed-yzj/newpaper4/laboratory/model/labotary_nose_thermal_att.hdf5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  patience=15,
                                  mode='auto',
                                  factor=0.7,
                                  min_lr=1e-6)

    # csv_logger = CSVLogger('training.csv')

    model.fit(x_train,
              y_train,
              batch_size=32,
              epochs=200,
              verbose=1,
              callbacks=[model_checkpoint, reduce_lr],
              validation_data=(x_val, y_val))

    return 0
Example #13
0
def objective(target):

    params = {
        'dataset': 'zundel_100k',
        'dataset_size_limit': 100000,
        'soap': {
            # https://singroup.github.io/dscribe/latest/tutorials/soap.html
            'sigma': 1,  #initial: 0.01
            'nmax': 5,  #3
            'lmax': 2,  #3
            'rcut': 9  #7
        },
        'pca': {
            # https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
            'variance': 0.999999
        },
        'scalers': {
            # https://scikit-learn.org/stable/modules/preprocessing.html
            'desc_scaler_type': StandardScaler,
            'energies_scaler': MinMaxScaler()
        },
        'train_set_size_ratio': 0.6,
        'validation_set_size_ratio': 0.2,
        'submodel': {
            # https://keras.io/guides/sequential_model/
            'hidden_layers': {
                'units': 30,
                'activation': 'tanh',
                'use_bias': True,
                'kernel_initializer': 'GlorotUniform',
                'kernel_regularizer': None,
                'bias_regularizer': 'l2',
                'activity_regularizer': None,
                'kernel_constraint': None,
                'bias_constraint': None
            },
            'output_layer': {
                'activation': 'linear',
                'use_bias': True,
                'kernel_initializer': 'Zeros',
                'kernel_regularizer': None,
                'bias_regularizer': None,
                'activity_regularizer': None,
                'kernel_constraint': None,
                'bias_constraint': None
            },
        },
        'model': {
            'compilation': {
                'optimizer': optimizers.Adam(learning_rate=0.001),
                'loss': losses.MeanSquaredError(),
                'metrics': metrics.MeanSquaredError(),
            }
        },
        'fit': {
            'batch_size':
            32,
            'epochs':
            100,
            'callbacks': [
                callbacks.EarlyStopping(monitor='loss', patience=7),
                callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.1,
                                            patience=10)
            ]
        },
        'Monte-Carlo': {
            'temperature': 100,
            'Number_of_steps': 100000,
            'box_size': 2,
        }
    }

    # Load dataset and compute descriptors

    descriptors, energies = data.load_and_compute(
        dataset=params['dataset'],
        soap_params=params['soap'],
        limit=params['dataset_size_limit'])

    #train_size = int(params['train_set_size_ratio'] * np.shape(descriptors)[0])
    #validation_size = int(params['validation_set_size_ratio'] * np.shape(descriptors)[0])

    X_train, y_train, X_validation, y_validation, X_test, y_test = preprocessing.generate_scaled_sets(
        atoms=data.get_atoms_list(params['dataset']),
        desc=descriptors,
        energies=energies,
        ratios=(params['train_set_size_ratio'],
                params['validation_set_size_ratio']),
        pca_params=params['pca'],
        desc_scaler_type=params['scalers']['desc_scaler_type'],
        energies_scaler=params['scalers']['energies_scaler'])

    X_train = convert_to_inputs(X_train)
    X_validation = convert_to_inputs(X_validation)
    X_test = convert_to_inputs(X_test)

    # Create model and train it
    model = NN.create(
        atoms=data.get_atoms_list(params['dataset']),
        desc_length=np.shape(X_train[0])[1],
        comp_params=params['model']['compilation'],
        sub_hidden_layers_params=params['submodel']['hidden_layers'],
        sub_output_layer_params=params['submodel']['output_layer'])

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_validation, y_validation),
                        verbose=0,
                        **params['fit'])

    loss = history.history['val_loss'][-1]

    return {'loss': loss, 'status': STATUS_OK}