Пример #1
0
def get_model():
    # we need a layer that acts as input.
    # shape of that input has to be known and depends on data.
    input_layer = layers.Input(shape=(1, ))

    # hidden layers are the model's power to fit data.
    # number of neurons and type of layers are crucial.
    # idea behind decreasing number of units per layer:
    # increase the "abstraction" in each layer...

    # last layer represents output.
    # activation of each neuron corresponds to the models decision of
    # choosing that class.
    # softmax ensures that all activations summed up are equal to 1.
    # this lets one interpret that output as a probability
    hidden_layer = layers.Dense(units=100, activation='relu')(input_layer)
    #hidden_layer = layers.Dense(units=20, activation='relu')(hidden_layer)
    output_layer = layers.Dense(units=4, activation="relu")(hidden_layer)
    # actual creation of the model with in- and output layers
    model = engine.Model(inputs=[input_layer], outputs=[output_layer])
    # transform into a trainable model by specifying the optimizing function
    # (here stochastic gradient descent),
    # as well as the loss (eg. how big of an error is produced by the model)
    # track the model's accuracy as an additional metric (only possible for classification)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Пример #2
0
 def initialize_network(self,
                        stack_nb,
                        board_cls=Board,
                        weight_decay=0.0005):
     if not isinstance(board_cls, type) or not issubclass(board_cls, Board):
         raise Exception('`board_cls` must be a class/subclass of `Board`')
     board = Board(toTensor=True)
     resnet_inputs, resnet_outputs = get_unitized_resnet(
         board.tensor.shape, stack_nb, weight_decay)
     tensor = KL.Conv2D(filters=2,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding='same',
                        kernel_initializer=KI.he_normal(),
                        name='last_convolution')(resnet_outputs)
     tensor = Unitization(axis=AXIS, name='last_unitization')(tensor)
     tensor = KL.Activation(activation='relu', name='last_relu')(tensor)
     tensor = KL.Flatten(name='flatten')(tensor)
     outputs = KL.Dense(BOARD_SIZE**2,
                        activation='softmax',
                        name='distribution')(tensor)
     model = KE.Model(resnet_inputs,
                      outputs,
                      name='unitized_resnet_{}_policy'.format(6 * stack_nb +
                                                              2))
     # model.summary()
     return model
Пример #3
0
Файл: cnn.py Проект: s4ke/FridAI
def get_model():
    # load prepared data set containing 1797 digits as 8x8 images
    digit_features, digit_classes = datasets.load_digits(
        n_class=config.NUM_DIGITS, return_X_y=True)
    num_samples = digit_classes.shape[0]

    # normalize features, see documentation of sklearn.datasets.load_digits!
    digit_features /= config.MAX_FEATURE

    # we need so called "one-hot" vectors
    digit_labels = numpy.zeros(shape=(num_samples, config.NUM_DIGITS))
    for index, digit_class in enumerate(digit_classes):
        digit_labels[index][digit_class] = 1.

    # we need a layer that acts as input.
    # shape of that input has to be known and depends on data.
    input_layer = layers.Input(shape=(config.NUM_FEATURES, ))

    # data is 1D, convert it to its original 2D image form, which has 3 dimensions
    # remember the scalar for pixel values, which is the third dimension
    # color images dont have a scalar, but three values in this dimension instead
    hidden_layer = layers.Reshape(target_shape=(8, 8, 1))(input_layer)

    # convolutions are filters, which are useful for finding features (eg. edges)
    # 2D-convolutions take 3D data (see above) and introduce a new dimension:
    # the filtered data
    hidden_layer = layers.Conv2D(filters=64, kernel_size=(3, 3))(hidden_layer)
    hidden_layer = layers.Conv2D(filters=32, kernel_size=(1, 1))(hidden_layer)
    hidden_layer = layers.Conv2D(filters=16, kernel_size=(1, 1))(hidden_layer)

    # convert it back to 1D data, so we can map it to the 1D output
    hidden_layer = layers.Flatten()(hidden_layer)

    # last layer represents output.
    # activation of each neuron corresponds to the models decision of
    # choosing that class.
    # softmax ensures that all activations summed up are equal to 1.
    # this lets one interpret that output as a probability
    output_layer = layers.Dense(units=config.NUM_DIGITS,
                                activation='softmax')(hidden_layer)

    # actual creation of the model with in- and output layers
    model = engine.Model(inputs=[input_layer], outputs=[output_layer])

    # transform into a trainable model by specifying the optimizing function
    # (here stochastic gradient descent),
    # as well as the loss (eg. how big of an error is produced by the model)
    # track the model's accuracy as an additional metric (only possible for classification)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Пример #4
0
    def initialize_network(self,
                           stack_nb,
                           board_cls=Board,
                           weight_decay=0.0005):
        if not isinstance(board_cls, type) or not issubclass(board_cls, Board):
            raise Exception('`board_cls` must be a class/subclass of `Board`')
        board = Board(toTensor=True)
        resnet_inputs, resnet_outputs = get_resnet(board.tensor.shape,
                                                   stack_nb, weight_decay)
        tensor = KL.Conv2D(filters=2,
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           padding='same',
                           kernel_initializer=KI.he_normal(),
                           name='last_policy_convolution')(resnet_outputs)
        tensor = KL.BatchNormalization(axis=AXIS,
                                       name='last_policy_batch_norm')(tensor)
        tensor = KL.Activation(activation='relu',
                               name='last_policy_relu')(tensor)
        tensor = KL.Flatten(name='policy_flatten')(tensor)
        distributions = KL.Dense(BOARD_SIZE**2,
                                 activation='softmax',
                                 name='distribution')(tensor)

        tensor = KL.Conv2D(filters=1,
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           padding='same',
                           kernel_initializer=KI.he_normal(),
                           name='last_value_convolution')(resnet_outputs)
        tensor = KL.BatchNormalization(axis=AXIS,
                                       name='last_value_batch_norm')(tensor)
        tensor = KL.Activation(activation='relu',
                               name='last_value_relu')(tensor)
        tensor = KL.Flatten(name='value_flatten')(tensor)
        tensor = KL.Dense(256, activation='relu',
                          name='full_connected_layer')(tensor)
        values = KL.Dense(1, activation='tanh', name='value')(tensor)
        return KE.Model(resnet_inputs, [distributions, values],
                        name='resnet_{}_mixture'.format(6 * stack_nb + 2))
Пример #5
0
		
		return x


if __name__ == "__main__":
	import cv2, sys
	import __main__ as SP
	import fft      as CF
	
	# Build Model
	x = i = KL.Input(shape=(6,512,512))
	f = CF.FFT2()(x)
	p = SP.SpectralPooling2D(gamma=[0.15,0.15])(f)
	o = CF.IFFT2()(p)
	
	model = KE.Model([i], [f,p,o])
	model.compile("sgd", "mse")
	
	# Use it
	img      = cv2.imread(sys.argv[1])
	imgBatch = img[np.newaxis,...].transpose((0,3,1,2))
	imgBatch = np.concatenate([imgBatch, np.zeros_like(imgBatch)], axis=1)
	f,p,o    = model.predict(imgBatch)
	ffted    = np.sqrt(np.sum(f[:,:3]**2 + f[:,3:]**2, axis=1))
	ffted    = ffted .transpose((1,2,0))/255
	pooled   = np.sqrt(np.sum(p[:,:3]**2 + p[:,3:]**2, axis=1))
	pooled   = pooled.transpose((1,2,0))/255
	filtered = np.clip(o,0,255).transpose((0,2,3,1))[0,:,:,:3].astype("uint8")
	
	# Display it
	cv2.imshow("Original", img)
Пример #6
0
def create_resnet_version_3(**kwargs):
    default = {
        'blocks': 3,
        'kernel_size': (3, 3),
        'filters': 256,
        'output_size': SIZE**2,
        'weight_decay': 1e-4
    }

    unknown = set(kwargs.keys()) - set(default.keys())
    if unknown:
        raise Exception(('Unknown arguments:' +
                         ','.join(['{}'] * len(unknown))).format(*unknown))

    default.update(kwargs)

    conv_setting = {
        'data_format': K.image_data_format(),
        'padding': 'same',
        'activation': 'linear',
        'kernel_initializer': 'he_normal',
        'kernel_regularizer': regularizers.l2(default['weight_decay'])
    }

    if K.image_data_format() == 'channels_last':
        input_channels = Preprocessor.shape[1]
        input_shape = (SIZE, SIZE, input_channels)
    else:
        input_shape = Preprocessor.shape[1:]

    input = keras_engine.Input(input_shape)
    tensor = keras_layers.Conv2D(filters=default['filters'],
                                 kernel_size=default['kernel_size'],
                                 name='pre_convolution',
                                 **conv_setting)(input)
    tensor = keras_layers.BatchNormalization(
        axis=CHANNEL_AXIS, name='pre_batch_normalization')(tensor)
    tensor = keras_layers.Activation('relu', name='pre_relu')(tensor)

    def get_block_output(x, count=[0]):
        count[0] += 1
        t = keras_layers.Conv2D(filters=default['filters'] // 4,
                                kernel_size=(1, 1),
                                name='convolution_{:d}_1'.format(count[0]),
                                **conv_setting)(x)
        t = keras_layers.Conv2D(filters=default['filters'] // 4,
                                kernel_size=default['kernel_size'],
                                name='convolution_{:d}_2'.format(count[0]),
                                **conv_setting)(t)
        t = keras_layers.Conv2D(filters=default['filters'],
                                kernel_size=(1, 1),
                                name='convolution_{:d}_3'.format(count[0]),
                                **conv_setting)(t)
        t = keras_layers.add([x, t], name='add_{:d}'.format(count[0]))
        t = keras_layers.BatchNormalization(
            axis=CHANNEL_AXIS,
            name='batch_normalization_{:d}'.format(count[0]))(t)
        y = keras_layers.Activation('relu',
                                    name='relu_{:d}'.format(count[0]))(t)

        return y

    for _ in range(default['blocks']):
        tensor = get_block_output(tensor)

    policy_tensor = keras_layers.Conv2D(filters=2,
                                        kernel_size=(1, 1),
                                        name='policy_convolution',
                                        **conv_setting)(tensor)
    policy_tensor = keras_layers.BatchNormalization(
        axis=CHANNEL_AXIS, name='policy_batch_normalization')(policy_tensor)
    policy_tensor = keras_layers.Activation('relu',
                                            name='policy_relu')(policy_tensor)
    policy_tensor = keras_layers.Flatten(name='policy_flatten')(policy_tensor)
    policy_output = keras_layers.Dense(
        default['output_size'],
        activation='softmax',
        name='p',
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(
            default['weight_decay']))(policy_tensor)

    value_tensor = keras_layers.GlobalAveragePooling2D(
        data_format=K.image_data_format())(tensor)
    # value_tensor = keras_layers.Conv2D(
    #     filters=1, kernel_size=(1, 1),
    #     name='value_convolution', **conv_setting
    # )(tensor)
    # value_tensor = keras_layers.BatchNormalization(
    #     axis=CHANNEL_AXIS, name='value_batch_normalization'
    # )(value_tensor)
    # value_tensor = keras_layers.Activation(
    #     'relu', name='value_relu'
    # )(value_tensor)
    # value_tensor = keras_layers.Flatten(name='value_flatten')(value_tensor)
    value_tensor = keras_layers.Dense(
        256,
        activation='relu',
        name='value_fc',
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(
            default['weight_decay']))(value_tensor)
    value_output = keras_layers.Dense(
        1,
        activation='tanh',
        name='v',
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(
            default['weight_decay']))(value_tensor)

    model = keras_engine.Model(input, [policy_output, value_output],
                               name='policy_value_model')

    policy_model = keras_engine.Model(input,
                                      policy_output,
                                      name='policy_model')

    value_model = keras_engine.Model(input, value_output, name='value_model')

    return model, policy_model, value_model
Пример #7
0
def create_resnet_version_2(blocks=3, weight_decay=1e-4, **kwargs):

    if K.image_data_format() == 'channels_last':
        input_channels = Preprocessor.shape[1]
        input_shape = (SIZE, SIZE, input_channels)
    else:
        input_shape = Preprocessor.shape[1:]
    input = keras_layers.Input(input_shape, name='input')

    conv_config = {
        'kernel_size': (3, 3),
        'kernel_regularizer': regularizers.l2(weight_decay),
        'data_format': K.image_data_format(),
        'padding': 'same',
        'kernel_initializer': 'he_normal',
        'use_bias': False,
        'filters': None,
        'strides': None,
        'name': None,
        'activation': None
    }

    conv_config['filters'] = 16
    conv_config['strides'] = (1, 1)
    conv_config['name'] = 'pre_convolution'
    conv_config['activation'] = 'relu'
    tensor = keras_layers.Conv2D(**conv_config)(input)

    conv_config['activation'] = 'linear'
    current_filters = 16
    for filters in [16, 32, 64]:
        for block_nb in range(blocks):
            conv_config['filters'] = filters
            if filters == current_filters:
                conv_config['strides'] = (1, 1)
                shortcut = tensor
            else:
                conv_config['kernel_size'] = (1, 1)
                conv_config['strides'] = (2, 2)
                conv_config[
                    'name'] = '{:d}_filters_{:d}_downsampling_convolution'.format(
                        filters, block_nb)
                shortcut = keras_layers.Conv2D(**conv_config)(tensor)
                conv_config['kernel_size'] = (3, 3)
                current_filters = filters

            tensor = keras_layers.BatchNormalization(axis=CHANNEL_AXIS)(tensor)
            tensor = keras_layers.Activation('relu')(tensor)
            conv_config['name'] = '{:d}_filters_{:d}_1_convolution'.format(
                filters, block_nb)
            tensor = keras_layers.Conv2D(**conv_config)(tensor)

            tensor = keras_layers.BatchNormalization(axis=CHANNEL_AXIS)(tensor)
            tensor = keras_layers.Activation('relu')(tensor)
            conv_config['strides'] = (1, 1)
            conv_config['name'] = '{:d}_filters_{:d}_2_convolution'.format(
                filters, block_nb)
            tensor = keras_layers.Conv2D(**conv_config)(tensor)

            tensor = keras_layers.Add()([tensor, shortcut])

    tensor = keras_layers.BatchNormalization(axis=CHANNEL_AXIS)(tensor)
    tensor = keras_layers.Activation('relu')(tensor)

    tensor = keras_layers.GlobalAveragePooling2D(
        data_format=K.image_data_format())(tensor)
    policy_output = keras_layers.Dense(
        SIZE**2,
        activation='softmax',
        name='p',
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(weight_decay))(tensor)

    value_tensor = keras_layers.Dense(
        256,
        activation='relu',
        name='value_fc',
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(weight_decay))(tensor)
    value_output = keras_layers.Dense(
        1,
        activation='tanh',
        name='v',
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(weight_decay))(value_tensor)

    # conv_config.update({'filters': 2, 'kernel_size': (1, 1), 'name': 'policy_convolution'})
    # policy_tensor = keras_layers.Conv2D(**conv_config)(tensor)
    # policy_tensor = keras_layers.BatchNormalization(
    #     axis=CHANNEL_AXIS, name='policy_batch_normalization'
    # )(policy_tensor)
    # policy_tensor = keras_layers.Activation(
    #     'relu', name='policy_relu'
    # )(policy_tensor)
    # policy_tensor = keras_layers.Flatten(name='policy_flatten')(policy_tensor)
    # policy_output = keras_layers.Dense(
    #     SIZE**2, activation='softmax', name='p',
    #     kernel_initializer='he_normal',
    #     kernel_regularizer=regularizers.l2(weight_decay)
    # )(policy_tensor)
    #
    # conv_config.update({'filters': 1, 'name': 'value_convolution'})
    # value_tensor = keras_layers.Conv2D(**conv_config)(tensor)
    # value_tensor = keras_layers.BatchNormalization(
    #     axis=CHANNEL_AXIS, name='value_batch_normalization'
    # )(value_tensor)
    # value_tensor = keras_layers.Activation(
    #     'relu', name='value_relu'
    # )(value_tensor)
    # value_tensor = keras_layers.Flatten(name='value_flatten')(value_tensor)
    # value_tensor = keras_layers.Dense(
    #     256, activation='relu', name='value_fc',
    #     kernel_initializer='he_normal',
    #     kernel_regularizer=regularizers.l2(weight_decay)
    # )(value_tensor)
    # value_output = keras_layers.Dense(
    #     1, activation='tanh', name='v',
    #     kernel_initializer='he_normal',
    #     kernel_regularizer=regularizers.l2(weight_decay)
    # )(value_tensor)

    model = keras_engine.Model(input, [policy_output, value_output],
                               name='policy_value_model')

    policy_model = keras_engine.Model(input,
                                      policy_output,
                                      name='policy_model')

    value_model = keras_engine.Model(input, value_output, name='value_model')

    return model, policy_model, value_model
Пример #8
0
	print(np.allclose(X, Y))
	
	
	# Theano
	z   = TT.dmatrix()
	f   = T.function([z], ifft(fft(z)))
	v   = np.concatenate([np.real(x)[np.newaxis,:], np.imag(x)[np.newaxis,:]], axis=0)
	print(v)
	print(f(v))
	print(np.allclose(v, f(v)))
	
	
	# Keras
	x = i = KL.Input(shape=(128, 32,32))
	x = IFFT2()(x)
	model = KE.Model([i],[x])
	
	loss  = "mse"
	opt   = KO.Adam()
	
	model.compile(opt, loss)
	model._make_train_function()
	model._make_predict_function()
	model._make_test_function()
	
	v = np.random.normal(size=(13,128,32,32))
	#print v
	V = model.predict(v)
	#print V
	print(V.shape)