def linear_regression(a=1.0, b=0.0):
    X = np.linspace(-100, 100, 200)
    X = X.reshape((-1, 1))
    [train_x, test_x] = split_data(X, ratio=0.8, random=True)
    train_y = a * train_x + b
    test_y = a * test_x + b

    i = Input(1)
    x = Dense(1)(i)

    # define trainer
    trainer = Trainer(loss='mse',
                      optimizer=Adam(learning_rate=0.2),
                      batch_size=50,
                      epochs=50)

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    model.fit(train_x, train_y)

    # predict
    y_hat = model.predict(test_x)
    plt.plot(test_x, test_y, 'b')
    plt.plot(test_x, y_hat, 'r')
    plt.show()
Exemplo n.º 2
0
 def get_shapes(self):
     '''Draw layer shapes.'''
     shapes = []
     for i, layer in enumerate(self.layers):
         if 'input' in layer.name:
             shapes = np.append(
                 shapes,
                 Input(name=layer.name,
                       position=layer.position,
                       output_dim=layer.output_dim,
                       depth=layer.depth,
                       flatten=layer.flatten,
                       output_dim_label=layer.output_dim_label).draw())
         if 'dense' in layer.name:
             shapes = np.append(
                 shapes,
                 Dense(name=layer.name,
                       position=layer.position,
                       output_dim=layer.output_dim,
                       depth=layer.depth,
                       activation=layer.activation,
                       maxpool=layer.maxpool,
                       flatten=layer.flatten,
                       output_dim_label=layer.output_dim_label).draw())
         if 'conv' in layer.name:
             shapes = np.append(
                 shapes,
                 Convolution(
                     name=layer.name,
                     position=layer.position,
                     output_dim=layer.output_dim,
                     depth=layer.depth,
                     activation=layer.activation,
                     maxpool=layer.maxpool,
                     flatten=layer.flatten,
                     output_dim_label=layer.output_dim_label).draw())
         if 'output' in layer.name:
             shapes = np.append(
                 shapes,
                 Output(name=layer.name,
                        position=layer.position,
                        output_dim=layer.output_dim,
                        depth=layer.depth,
                        output_dim_label=layer.output_dim_label).draw())
         if i:
             shapes = np.append(
                 shapes,
                 Funnel(prev_position=self.layers[i - 1].position,
                        prev_depth=self.layers[i - 1].depth,
                        prev_output_dim=self.layers[i - 1].output_dim,
                        curr_position=layer.position,
                        curr_depth=layer.depth,
                        curr_output_dim=layer.output_dim,
                        color=(178.0 / 255, 178.0 / 255,
                               178.0 / 255)).draw())
     self.shapes = shapes
Exemplo n.º 3
0
def main():
    c = color_codes()
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    try:
        net = load_model('/home/mariano/Desktop/test.tf')
    except IOError:
        x = Input([784])
        x_image = Reshape([28, 28, 1])(x)
        x_conv1 = Conv(filters=32,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(x_image)
        h_pool1 = MaxPool((2, 2), padding='same')(x_conv1)
        h_conv2 = Conv(filters=64,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(h_pool1)
        h_pool2 = MaxPool((2, 2), padding='same')(h_conv2)
        h_fc1 = Dense(1024, activation='relu')(h_pool2)
        h_drop = Dropout(0.5)(h_fc1)
        y_conv = Dense(10)(h_drop)

        net = Model(x,
                    y_conv,
                    optimizer='adam',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + c['b'] +
          'Original (MNIST)' + c['nc'] + c['g'] + ' net ' + c['nc'] + c['b'] +
          '(%d parameters)' % net.count_trainable_parameters() + c['nc'])

    net.fit(mnist.train.images,
            mnist.train.labels,
            val_data=mnist.test.images,
            val_labels=mnist.test.labels,
            patience=10,
            epochs=200,
            batch_size=1024)

    save_model(net, '/home/mariano/Desktop/test.tf')
Exemplo n.º 4
0
def linear_classification(a=1.0, b=0.0, graph=False):

    # prepare data
    x = np.linspace(-100, 100, 200)
    y = a * x + b
    X = np.array(list(zip(x, y))) + np.random.randn(200, 2) * 100
    Y = to_one_hot(np.where(a * X[:, 0] + b > X[:, 1], 1, 0))
    (train_x, train_y), (test_x, test_y) = split_data(X,
                                                      Y,
                                                      ratio=0.8,
                                                      random=True)

    # build simple FNN
    i = Input(2)
    x = Dense(2, activation='softmax')(i)

    # define trainer
    trainer = Trainer(loss='cross_entropy',
                      optimizer=Adam(learning_rate=0.05),
                      batch_size=50,
                      epochs=50,
                      metrics=['accuracy'])

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    model.fit(train_x, train_y)
    print(model.evaluate(test_x, test_y))

    if graph:
        plt.plot(model.history['loss'])
        plt.show()

        # predict
        y_hat = model.predict(test_x)
        y_hat = np.argmax(y_hat, axis=1)
        simple_plot(test_x, y_hat, a, b)
Exemplo n.º 5
0
def binary_classification():
  def separate_label(data):
    X = normalize(data[:, :2].astype('float32'))
    Y = np.where(data[:, 2] == b'black', 0, 1)
    return X, Y

  # prepare train data
  data_dir = "data/examples/binary_classification"
  train_data_path = os.path.join(data_dir, 'training.arff')
  train_data = load_arff(train_data_path)
  train_x, train_y = separate_label(train_data)
  train_y = to_one_hot(train_y)

  # build simple FNN
  i = Input(2)
  x = Dense(30, activation='relu')(i)
  x = Dense(30, activation='relu')(x)
  x = Dense(2, activation='softmax')(x)

  # define trainer
  trainer = Trainer(loss='cross_entropy', optimizer=Adam(clipvalue=1.0), batch_size=256, epochs=500, metrics=['accuracy'])

  # create model
  model = Sequential(i, x, trainer)

  model.summary()

  # training process
  model.fit(train_x, train_y)

  plt.plot(range(len(model.history['loss'])), model.history['loss'])
  plt.show()

  # predict
  test_data_path = os.path.join(data_dir, 'test.arff')
  test_data = load_arff(test_data_path)
  test_x, _ = separate_label(test_data)

  y_hat = model.predict(test_x)
  simple_plot(test_x, y_hat)
Exemplo n.º 6
0
def load_model(filepath):
    model_dict = dill.load(open(filepath, 'rb'))
    params = model_dict['params']
    layers = model_dict['layers']
    for name, layer_dict in layers.items():
        W_name = layer_dict.pop('W') if 'W' in layer_dict else None
        b_name = layer_dict.pop('b') if 'b' in layer_dict else None
        layer = layer_from_dicts(layer_dict)
        if W_name is not None:
            layer.W = Layer._weight_variable(params[W_name][0], layer.name)
            Model.session.run(layer.W.assign(params[W_name][1]))
        if b_name is not None:
            layer.b = Layer._bias_variable(params[b_name][0], layer.name)
            Model.session.run(layer.b.assign(params[b_name][1]))
        layers.update({name: layer})
    tensor_dict = dict()
    for tensor in model_dict['tensors']:
        tensor_name = tensor[0]
        layer_name = tensor[2]
        if 'Input.T.' in layer_name:
            tensor_dict.update({
                tensor_name:
                Input(ast.literal_eval(layer_name.replace('Input.T.', '')))
            })
        else:
            layer = layers[layer_name]
            input_tensor_name = tensor[1]
            input_tensor = tensor_dict[input_tensor_name] if not isinstance(input_tensor_name, list) else\
                [tensor_dict[i_name] for i_name in input_tensor_name]
            tensor_dict.update({tensor_name: layer(input_tensor)})
    inputs = model_dict['inputs']
    inputs = [tensor_dict[i_name] for i_name in inputs] if isinstance(
        inputs, list) else tensor_dict[inputs]
    outputs = model_dict['outputs']
    outputs = [tensor_dict[i_name] for i_name in outputs] if isinstance(
        outputs, list) else tensor_dict[outputs]
    return Model(inputs, outputs, model_dict['optimizer'], model_dict['loss'],
                 model_dict['metrics'])
Exemplo n.º 7
0
def universal_approximation(f, x):
    [train_x, test_x] = split_data(x, ratio=0.8, random=True)
    train_y = f(train_x)

    test_x = np.sort(test_x, axis=0)
    test_y = f(test_x)

    # build simple FNN
    i = Input(1)
    x = Dense(50, activation='relu')(i)
    x = Dense(1)(x)

    # define trainer
    schedule = ExponentialDecay(initial_learning_rate=0.01, decay_rate=0.75)
    trainer = Trainer(loss='mse',
                      optimizer=Adam(learning_rate=schedule),
                      batch_size=50,
                      epochs=750)

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    start = time.time()
    model.fit(train_x, train_y)
    print(time.time() - start)

    plt.plot(range(len(model.history['loss'])), model.history['loss'])
    plt.show()

    # predict
    y_hat = model.predict(test_x)
    plt.plot(test_x, test_y, 'b-', label='original')
    plt.plot(test_x, y_hat, 'r-', label='predicted')
    plt.legend()
    plt.show()
Exemplo n.º 8
0
    else:
        sys.stdout.write('ETA: ' + seconds_to_string(remaining))

    # Output padding
    sys.stdout.write(' ' * 20)
    # Allow progress bar to persist if it's complete
    if current == total:
        sys.stdout.write('\n')
    # Flush to standard out
    sys.stdout.flush()
    # Return the time of the progress update
    return time.time()


model = Sequential()
model.add(Input(2))
model.add(Dense(25))
model.add(Activation("relu"))
model.add(Dense(50))
model.add(Activation("relu"))
model.add(Dense(50))
model.add(Activation("relu"))
model.add(Dense(25))
model.add(Activation("relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))


def initialise_layer_parameters(seed=2):
    # random seed initiation
    np.random.seed(seed)
Exemplo n.º 9
0
    def __init__(self, layers, fold_index, batch_size):
        self.sess = tf.get_default_session()
        self.batch_size = batch_size
        self.fold_index = fold_index

        checkpoint_path = os.path.join(CHECKPOINT_PATH,
                                       'model_{}'.format(self.fold_index))
        mkdirp(checkpoint_path)
        self.checkpoint_dest = os.path.join(checkpoint_path, 'checkpoint')

        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        global_step_op = self.global_step.assign_add(1)

        self.is_training = tf.placeholder(tf.bool, shape=[])

        self.x = tf.placeholder(tf.float32,
                                shape=[None, HEIGHT, WIDTH, NUM_CHANNELS])
        self.y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])

        self.layers = [Input(self.x)] + layers

        prev_y = None
        for i, layer in enumerate(self.layers):
            prev_y = layer.apply(prev_y, i, self)
        self.y = prev_y

        with tf.name_scope("loss"):
            self.loss_op = -tf.reduce_sum(self.y_ * tf.log(self.y + 1e-12))
            tf.scalar_summary("loss", self.loss_op)

            loss_ema = tf.train.ExponentialMovingAverage(
                decay=0.9, num_updates=self.global_step)
            loss_ema_op = loss_ema.apply([self.loss_op])
            tf.scalar_summary('loss_ema', loss_ema.average(self.loss_op))

        with tf.name_scope("test"):
            correct_prediction = tf.equal(tf.argmax(self.y, 1),
                                          tf.argmax(self.y_, 1))

            self.accuracy_op = tf.reduce_mean(
                tf.cast(correct_prediction, "float"))
            tf.scalar_summary('accuracy', self.accuracy_op)

            accuracy_ema = tf.train.ExponentialMovingAverage(
                decay=0.9, num_updates=self.global_step)
            accuracy_ema_op = accuracy_ema.apply([self.accuracy_op])
            tf.scalar_summary('accuracy_ema',
                              accuracy_ema.average(self.accuracy_op))

        with tf.control_dependencies(
            [global_step_op, accuracy_ema_op, loss_ema_op]):
            self.train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(
                self.loss_op, name='train')

        self.summaries_op = tf.merge_all_summaries()

        self.saver = tf.train.Saver(max_to_keep=1)

        self.sess.run(tf.initialize_all_variables())

        summary_run_path = os.path.join(SUMMARY_PATH, str(int(time.time())))
        self.summary_writer = tf.train.SummaryWriter(summary_run_path,
                                                     self.sess.graph_def)

        tf.train.write_graph(self.sess.graph_def,
                             MODEL_PATH,
                             'model.pb',
                             as_text=False)

        latest_checkpoint_path = tf.train.latest_checkpoint(
            self.checkpoint_dest)
        print('Attempting to restore {}...'.format(latest_checkpoint_path))
        if latest_checkpoint_path:
            print('Restoring checkpoint: {}'.format(latest_checkpoint_path))
            self.saver.restore(self.sess, latest_checkpoint_path)
        else:
            print('Could not find checkpoint to restore.')
Exemplo n.º 10
0
    data.train.crop(config.max_train_examples)

# Model

model = Sequential()

# Separate embedded-words and word-features sequences
embedded = Sequential()
EmbeddingLayer = Embedding if config.learn_embeddings else FixedEmbedding
embedded.add(
    EmbeddingLayer(embedding.shape[0],
                   embedding.shape[1],
                   input_length=data.input_size,
                   weights=[embedding]))
features = Sequential()
features.add(Input(data.feature_shape))

model.add(Merge([embedded, features], mode='concat', concat_axis=2))
model.add(Flatten())

# Fully connected layers
for size in config.hidden_sizes:
    model.add(Dense(size, W_regularizer=l2(config.l2_lambda)))
    model.add(Activation(config.hidden_activation))
model.add(Dense(data.output_size))
model.add(Activation('softmax'))

model.compile(optimizer=optimizer, loss=config.loss)


def predictions(model, inputs):
Exemplo n.º 11
0
def get_brats_nets(input_shape, filters_list, kernel_size_list, dense_size,
                   nlabels):
    inputs = Input(shape=input_shape)
    conv = inputs
    for filters, kernel_size in zip(filters_list, kernel_size_list):
        conv = Conv(filters,
                    kernel_size=(kernel_size, ) * 3,
                    activation='relu',
                    data_format='channels_first')(conv)

    full = Conv(dense_size,
                kernel_size=(1, 1, 1),
                data_format='channels_first',
                name='fc_dense',
                activation='relu')(conv)
    full_roi = Conv(nlabels[0],
                    kernel_size=(1, 1, 1),
                    data_format='channels_first',
                    name='fc_roi')(full)
    full_sub = Conv(nlabels[1],
                    kernel_size=(1, 1, 1),
                    data_format='channels_first',
                    name='fc_sub')(full)

    rf_roi = Concatenate(axis=1)([conv, full_roi])
    rf_sub = Concatenate(axis=1)([conv, full_sub])

    rf_num = 1
    while np.product(rf_roi.shape[2:]) > 1:
        rf_roi = Conv(dense_size,
                      kernel_size=(3, 3, 3),
                      data_format='channels_first',
                      name='rf_roi%d' % rf_num)(rf_roi)
        rf_sub = Conv(dense_size,
                      kernel_size=(3, 3, 3),
                      data_format='channels_first',
                      name='rf_sub%d' % rf_num)(rf_sub)
        rf_num += 1

    full_roi = Reshape((nlabels[0], -1))(full_roi)
    full_sub = Reshape((nlabels[1], -1))(full_sub)
    full_roi = Permute((2, 1))(full_roi)
    full_sub = Permute((2, 1))(full_sub)
    full_roi_out = Activation('softmax', name='fc_roi_out')(full_roi)
    full_sub_out = Activation('softmax', name='fc_sub_out')(full_sub)

    combo_roi = Concatenate(axis=1)([Flatten()(conv), Flatten()(rf_roi)])
    combo_sub = Concatenate(axis=1)([Flatten()(conv), Flatten()(rf_sub)])

    tumor_roi = Dense(nlabels[0], activation='softmax',
                      name='tumor_roi')(combo_roi)
    tumor_sub = Dense(nlabels[1], activation='softmax',
                      name='tumor_sub')(combo_sub)

    outputs_roi = [tumor_roi, full_roi_out]

    net_roi = Model(inputs=inputs,
                    outputs=outputs_roi,
                    optimizer='adadelta',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    outputs_sub = [tumor_sub, full_sub_out]

    net_sub = Model(inputs=inputs,
                    outputs=outputs_sub,
                    optimizer='adadelta',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    return net_roi, net_sub
Exemplo n.º 12
0
 os.system('clear')
 print(
     '\n\nWhat would you like to do with your model?\n\n1. Add a layer\n2. Remove a layer\n3. Display the model\n4. Save Model\n5. Quit'
 )
 in3 = input('> ')
 if in3 == 1:
     print(
         '\nWhat kind of layer would you like to add?\n1. Input Layer\n2. Fully Connected Layer\n3. Convolutional Layer\n4. Nevermind'
     )
     in4 = input('> ')
     if in4 == 1:
         name = raw_input('What would you like to name this layer? > ')
         hm_nodes = raw_input(
             'This kind of layer should have the same number of nodes as the data. How many nodes should be in this layer? > '
         )
         layer = Input(name, hm_nodes)
         model.add_layer(layer)
     if in4 == 2:
         name = raw_input('What would you like to name this layer? > ')
         hm_nodes = raw_input('How many nodes should be in this layer? > ')
         layer = Fully_Connected(name, hm_nodes)
         model.add_layer(layer)
     if in4 == 3:
         name = raw_input('What would you like to name this layer? > ')
         hm_nodes = raw_input('How many nodes should be in this layer? > ')
         layer = Convolutional(name, hm_nodes)
         model.add_layer(layer)
 if in3 == 2:
     print(
         '\nWhat layer index would you like to remove? (Use \'999\' to not remove a layer)'
     )
Exemplo n.º 13
0
def run_mlp(filepath, dataset, model_name_log, dataset_name_log):
    # config = settings.from_cli(['datadir', 'wordvecs'], Defaults)
    config = Defaults()
    config.datadir = dataset
    config.wordvecs = filepath
    config.results_log = defaultdict(defaultdict)
    config.model_name_log = model_name_log
    config.dataset_name_log = dataset_name_log
    optimizer = optimizers.get(config.optimizer)
    output_name = 'mlp--' + path.basename(config.datadir.rstrip('/'))
    #common.setup_logging(output_name)
    #settings.log_with(config, info)
    print('Processing model: {} on dataset: {}'.format(
        model_name_log, dataset_name_log))

    # Data
    data = input_data.read_data_sets(config.datadir, config.wordvecs, config)
    embedding = common.word_to_vector_to_matrix(config.word_to_vector)

    if config.max_train_examples and len(data.train) > config.max_train_examples:
        warn('cropping train data from %d to %d' % (len(data.train),
                                                    config.max_train_examples))
        data.train.crop(config.max_train_examples)

    # Model
    model = Sequential()

    # Separate embedded-words and word-features sequences
    embedded = Sequential()
    embedded.add(FixedEmbedding(embedding.shape[0], embedding.shape[1],
                                input_length=data.input_size, weights=[embedding]))
    features = Sequential()
    features.add(Input(data.feature_shape))

    model.add(Merge([embedded, features], mode='concat', concat_axis=2))
    model.add(Flatten())

    # Fully connected layers
    for size in config.hidden_sizes:
        model.add(Dense(size))
        model.add(Activation(config.hidden_activation))
    model.add(Dense(data.output_size))
    model.add(Activation('softmax'))

    model.compile(optimizer=optimizer, loss=config.loss)

    def predictions(model, inputs):
        output = list(model.predict(inputs, batch_size=config.batch_size))
        return np.argmax(np.asarray(output), axis=1)

    def eval_report(prefix, model, dataset, config, log=info):
        pred = predictions(model, dataset.inputs)
        gold = np.argmax(dataset.labels, axis=1)
        summary = common.performance_summary(dataset.words, gold, pred, config)
        # for s in summary.split('\n'):
        #     log(prefix + ' ' + s)

    # small_train = data.train.subsample(config.max_develtest_examples)
    # small_devel = data.devel.subsample(config.max_develtest_examples)

    for epoch in range(1, config.epochs+1):
        model.fit(data.train.inputs, data.train.labels,
                  batch_size=config.batch_size, nb_epoch=1,
                  verbose=config.verbosity)
        # eval_report('Ep %d train' % epoch, model, small_train, config)
        # eval_report('Ep %d devel' % epoch, model, small_devel, config)
        data.train.shuffle()

    # eval_report('FINAL train', model, data.train, config)
    # eval_report('FINAL devel', model, data.devel, config)

    # pred = predictions(model, data.devel.inputs)
    # common.save_gold_and_prediction(data.devel, pred, config, output_name)

    if config.test:
        eval_report('TEST', model, data.test, config)
        pred = predictions(model, data.test.inputs)
        common.save_gold_and_prediction(data.test, pred, config,
                                        'TEST--' + output_name)
    return config.results_log