def _build_net():
    model = keras.Sequential()
    #     model.add(keras.Input(shape=(3,224,224)))

    # group 1 224>112
    model.add(
        keras.layers.Conv2D(filters=64,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv1_1',
                            input_shape=(3, 224, 224)))
    model.add(keras.layers.ReLU(name='relu1_1'))

    model.add(
        keras.layers.Conv2D(filters=64,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv1_2'))
    model.add(keras.layers.ReLU(name='relu1_2'))

    model.add(
        keras.layers.MaxPooling2D(pool_size=(2, 2),
                                  strides=(2, 2),
                                  name='pool1',
                                  data_format='channels_first'))

    # group 2 112>56
    model.add(
        keras.layers.Conv2D(filters=128,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv2_1'))
    model.add(keras.layers.ReLU(name='relu2_1'))

    model.add(
        keras.layers.Conv2D(filters=128,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv2_2'))
    model.add(keras.layers.ReLU(name='relu2_2'))

    model.add(
        keras.layers.MaxPooling2D(pool_size=(2, 2),
                                  strides=(2, 2),
                                  name='pool2',
                                  data_format='channels_first'))

    # group 3 56>28
    model.add(
        keras.layers.Conv2D(filters=256,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv3_1'))
    model.add(keras.layers.ReLU(name='relu3_1'))

    model.add(
        keras.layers.Conv2D(filters=256,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv3_2'))
    model.add(keras.layers.ReLU(name='relu3_2'))

    model.add(
        keras.layers.Conv2D(filters=256,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv3_3'))
    model.add(keras.layers.ReLU(name='relu3_3'))

    model.add(
        keras.layers.MaxPooling2D(pool_size=(2, 2),
                                  strides=(2, 2),
                                  name='pool3',
                                  data_format='channels_first'))

    # group 4 28>14
    model.add(
        keras.layers.Conv2D(filters=512,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv4_1'))
    model.add(keras.layers.ReLU(name='relu4_1'))

    model.add(
        keras.layers.Conv2D(filters=512,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv4_2'))
    model.add(keras.layers.ReLU(name='relu4_2'))

    model.add(
        keras.layers.Conv2D(filters=512,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv4_3'))
    model.add(keras.layers.ReLU(name='relu4_3'))

    model.add(
        keras.layers.MaxPooling2D(pool_size=(2, 2),
                                  strides=(2, 2),
                                  name='pool4',
                                  data_format='channels_first'))

    # group 5 14>7
    model.add(
        keras.layers.Conv2D(filters=512,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv5_1'))

    model.add(keras.layers.ReLU(name='relu5_1'))

    model.add(
        keras.layers.Conv2D(filters=512,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv5_2'))
    model.add(keras.layers.ReLU(name='relu5_2'))

    model.add(
        keras.layers.Conv2D(filters=512,
                            kernel_size=[3, 3],
                            strides=(1, 1),
                            padding="same",
                            data_format='channels_first',
                            name='conv5_3'))
    model.add(keras.layers.ReLU(name='relu5_3'))
    model.add(
        keras.layers.MaxPooling2D(pool_size=(2, 2),
                                  strides=(2, 2),
                                  name='pool5',
                                  data_format='channels_first'))

    # fc6
    model.add(keras.layers.Flatten(data_format='channels_last', name='f6'))
    # here i assume both caffe flatten the tensor using row major oder
    # see https://en.wikipedia.org/wiki/Row-_and_column-major_order
    #
    # in other word, using the indices of the last axis as the indicator, the flattened sequence is
    #
    # 0,1,2,...,n,0,1,2,...,n
    #
    # NOT
    #
    # 0,0,0,...,0,1,1,1,...,n
    #
    # the test run of this model seems confirmed this assumption

    model.add(keras.layers.Dense(4096, name='fc6'))
    model.add(keras.layers.ReLU(name='relu6'))
    model.add(keras.layers.Dropout(0.5, name='drop6'))

    model.add(keras.layers.Dense(4096, name='fc7'))
    model.add(keras.layers.ReLU(name='relu7'))
    model.add(keras.layers.Dropout(0.5, name='drop7'))

    model.add(keras.layers.Dense(365, name='fc8a'))
    model.add(keras.layers.Softmax(axis=-1, name='prob'))

    return model
Ejemplo n.º 2
0
from __future__ import print_function
import keras
import tensorflow as tf
import numpy as np
import json
tmpx = json.load(open('x1.json', 'r'))
tmpy = json.load(open('y1.json', 'r'))
x = np.array(tmpx)
y = np.array(tmpy)
model = keras.Sequential([
    keras.layers.Dense(8000, input_shape=(5180, )),
    keras.layers.Dense(7000),
    keras.layers.Dense(5180, activation='softmax')
])

model.compile(loss="binary_crossentropy",
              optimizer="Adam",
              metrics=['accuracy'])
model.fit(x, y, batch_size=64, epochs=5)
jsonstr = model.to_json()
open("model2.json", 'w').write(jsonstr)
model.save_weights('weight2.h5')
Ejemplo n.º 3
0
        nb_frames=10,
        batch_size=4,
        input_shape=(224, 224),
        data_augmentation=image_aug)

    val_generator = MotionFlowDataGenerator(
        path=r'/home/coala/mestrado/datasets/UCF101/',
        split='val',
        nb_frames=10,
        batch_size=4,
        input_shape=(224, 224)
        # data_augmentation=image_aug
    )

    # test model
    model = keras.Sequential()
    model.add(
        keras.applications.VGG16(include_top=False,
                                 input_shape=(224, 224, 20),
                                 weights=None))
    td = keras.layers.TimeDistributed(input_shape=(5, 224, 224, 20),
                                      layer=model)
    model = keras.Sequential()
    model.add(td)
    # model.add(keras.layers.Conv3D(filters=256, kernel_size=(2, 2, 2), strides=2))
    model.add(keras.layers.MaxPool3D())
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dropout(.5))
    model.add(keras.layers.Dense(4096, activation='relu', name='fc1'))
    model.add(keras.layers.Dropout(.5))
    model.add(keras.layers.Dense(4096, activation='relu', name='fc2'))
Ejemplo n.º 4
0
#plt.imshow(train_images[1])
#plt.colorbar()
#plt.grid(False)
#plt.show()

#Data preprocessing - this squishes all data between 0 and 1, want data as small as possible so nueral network works faster
train_images = train_images / 255.0

test_images = test_images / 255.0  #if you forget to reprocess test images model wont fit so do both

model = keras.Sequential(
    [  #sequantial is basic, info passes from left to right sequencially
        keras.layers.Flatten(
            input_shape=(28, 28)
        ),  # input layer (1), flatten allows us to take in 28 by 28 shape and flatten it into pixels
        keras.layers.Dense(
            128, activation='relu'
        ),  # hidden layer (2), dense means neurons in previouse layer are connected to all in this one relu -rectify linear unit (lots of different ones)
        keras.layers.Dense(
            10, activation='softmax'
        )  # output layer (3), 10 because only 10 classes, softmax makes sure all values of neurons add to 1 and are between 0 and 1
    ])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(
    train_images, train_labels, epochs=2
)  # we pass the data, labels and epochs and watch the magic! Less epochs is usually better to not overtune to the training data

test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=1)
Ejemplo n.º 5
0
        sp = f.add_subplot(rows, cols, i+1)
        sp.axis('Off')
        if titles is not None:
            sp.set_title(titles[i], fontsize=16)
        plt.imshow(ims[i], interpolation=None if interp else 'none')


#it grabs total of 2 as batchsize is 2 from which some of them are cats others are dogs 
images , lables = next(train_batches)
plots(ims = images, titles = lables)

#noOfBatches = totalNumberOfSamples / batchSize
#NoOfBachesOfSamples to yield from generator before declaring one epcoh is finish

model = k.Sequential([
        k.layers.Conv2D(filters=32,kernel_size=(3,3), activation="relu", input_shape = (224,224,3)),
    k.layers.Flatten(),
    k.layers.Dense(2,activation="softmax")])

    
model.compile(k.optimizers.Adam(lr=0.0001),loss="categorical_crossentropy",metrics = ["accuracy"])

model.fit_generator(train_batches,steps_per_epoch=12/2,validation_data=valid_batches,
                    validation_steps=4/1,epochs=5,verbose=2)

images = []
lables = []
for i in np.arange(4):
    test_images , test_lables = next(test_batches)
    plots(ims= test_images ,titles=test_lables)    
    images.append(test_images)
    lables.append(test_lables)
import tensorflow as tf
from future import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
x = [2, 3, 7, 8, 9, 10]
y = [11, 12, 13, 14, 15, 16]

print(x)
print(y)
modelr = keras.Sequential([
    keras.layers.Dense(80, activation=tf.nn.relu, input_shape=[1]),
    keras.layers.Dense(80, activation=tf.nn.relu),
    keras.layers.Dense(80, activation=tf.nn.relu),
    keras.layers.Dense(80, activation=tf.nn.relu),
    keras.layers.Dense(80, activation=tf.nn.relu),
    keras.layers.Dense(1)
])

modelr.compile(loss='mean_squared_error',
               optimizer='adam',
               metrics=['mean_absolute_error', 'mean_squared_error'])

modelr.fit(x, y, epochs=9)
    def __init__(self, full_dataset, batch_size=1024, latent_dim=5, num_pattern=200, num_labels=10):
        """
        Objects which are shared across many agents
        (e.g. vae, ...)
        """
        self.handle = tf.placeholder(tf.string, shape=[])
        self.batch_size = batch_size
        self.latent_dim = latent_dim
        self.num_pattern = num_pattern
        self.num_labels = num_labels
        self.iterator = tf.data.Iterator.from_string_handle(self.handle, full_dataset.batch(self.batch_size).output_types,
                                                       full_dataset.batch(self.batch_size).output_shapes)
        x_train_ph, z_mean_ph, z_log_var_ph, y_train_ph = self.iterator.get_next()

        # x_train_ph = tf.placeholder(tf.float32, shape=[None, x_train.shape[1]])
        # y_train_ph = tf.placeholder(tf.float32, shape=[None,])
        # z_mean_ph = tf.placeholder(tf.float32, shape=[None, z_train.shape[1]])
        # z_log_var_ph = tf.placeholder(tf.float32, shape=[None, z_log_var_train.shape[1]])
        z_cov = tf.matrix_diag(tf.exp(z_log_var_ph + 1e-10))

        # Train the latent classifier ==================================================================================
        print('Training latent classifier...')
        self.latent_clf = keras.Sequential([
            keras.layers.InputLayer(input_tensor=z_mean_ph, input_shape=(self.latent_dim,)),
            keras.layers.Dense(128, activation=tf.nn.relu),
            keras.layers.Dense(10, activation=tf.nn.softmax)
        ])
        latent_loss = K.sparse_categorical_crossentropy(y_train_ph, self.latent_clf.output, from_logits=True)
        self.latent_train_step = tf.train.AdamOptimizer().minimize(latent_loss)

        # Train the specialized classifiers ================================================================================
        print('Training specialized classifiers...')
        self.scale_to_unconstrained = tfb.Chain([
            # step 3: flatten the lower triangular portion of the matrix
            tfb.Invert(tfb.FillTriangular(validate_args=True)),
            # step 2: take the log of the diagonals
            tfb.TransformDiagonal(tfb.Invert(tfb.Exp(validate_args=True))),
            # # step 1: decompose the precision matrix into its Cholesky factors
            # tfb.Invert(tfb.CholeskyOuterProduct(validate_args=True)),
        ])

        # random_init = False
        # if random_init:
        #     means = tf.Variable(initial_value=tf.random_uniform(means_.shape), trainable=True, dtype=tf.float32)
        #     scales_unconstrained = tf.Variable(
        #         initial_value=scale_to_unconstrained.forward(np.linalg.cholesky(covariances_)),
        #         trainable=True, dtype=tf.float32)
        #     scales_unconstrained = tf.Variable(initial_value=tf.random_uniform(scales_unconstrained.shape),
        #                                        trainable=True,
        #                                        dtype=tf.float32)
        #     scales = scale_to_unconstrained.inverse(scales_unconstrained)
        # else:
        #     means = tf.Variable(initial_value=means_, trainable=True, dtype=tf.float32)
        #     scales_unconstrained = tf.Variable(
        #         initial_value=scale_to_unconstrained.forward(np.linalg.cholesky(covariances_)),
        #         trainable=True, dtype=tf.float32)
        #     scales = scale_to_unconstrained.inverse(scales_unconstrained)

        self.means = tf.get_variable(name='gmm_means', shape=(self.num_pattern, self.latent_dim), trainable=True, dtype=tf.float32)
        self.scales_unconstrained = tf.get_variable(
            name='gmm_scale',
            shape=(self.num_pattern, (self.latent_dim*self.latent_dim + self.latent_dim) / 2),
            trainable=True,
            dtype=tf.float32
        )
        scales = self.scale_to_unconstrained.inverse(self.scales_unconstrained)

        covariances = tf.matmul(scales, tf.linalg.transpose(scales))
        p = tfp.distributions.MultivariateNormalTriL(
            loc=self.means,
            scale_tril=scales + tf.eye(self.latent_dim, self.latent_dim, batch_shape=(self.num_pattern,)) * 1e-5,
            validate_args=True
        )
        S_label_pattern = tfp.monte_carlo.expectation(
            f=lambda x: self.latent_clf(x),
            samples=p.sample(1),
            log_prob=p.log_prob,
            use_reparametrization=(p.reparameterization_type == tfp.distributions.FULLY_REPARAMETERIZED)
        )

        coeffs = Bhattacharyya_coeff(z_mean_ph, z_cov, self.means, covariances)
        coeffs = tf.reshape(coeffs, [tf.shape(x_train_ph)[0], self.num_pattern, 1])
        coeffs_sum = tf.reduce_sum(coeffs, axis=[1, 2])
        coeffs = tf.tile(coeffs, [1, 1, self.num_labels])
        # S_label_pattern = tf.reshape(S_label_pattern, [1, self.num_pattern, self.num_labels])
        # S_label_pattern = tf.tile(S_label_pattern, [tf.shape(x_train_ph)[0], 1, 1])

        S_label_x = coeffs * S_label_pattern + (1 - coeffs) * (1 / self.num_labels)

        # Combine specialized classifiers to obtained recomposed model =====================================================
        coeffs_sum = tf.reshape(coeffs_sum, [tf.shape(x_train_ph)[0], 1, 1])
        L_label_x = (coeffs / coeffs_sum) * S_label_x
        L_label_x = tf.reduce_sum(L_label_x, axis=1)

        # Construct loss function and optimizer ============================================================================
        # true_pred_ph = tf.placeholder(tf.float32, shape=[None, true_pred.shape[1]])
        # loss = tf.reduce_mean(
        #     tf.reduce_mean(tf.square(tf.log(tf.clip_by_value(L_label_x, 1e-10, 1.0)) - true_pred_ph), axis=1))
        # loss = tf.debugging.check_numerics(
        #     loss,
        #     'loss'
        # )

        # optimizer = tf.train.AdamOptimizer()
        # # opt = optimizer.minimize(loss, var_list=[scales_unconstrained, means])
        # grads_and_vars = optimizer.compute_gradients(loss, var_list=[self.scales_unconstrained, self.means])
        # # clipped_grads_and_vars = [(tf.clip_by_norm(g, 1), v) for g, v in grads_and_vars if g is not None]
        # grads_and_vars = [(tf.debugging.check_numerics(g, 'gradient'), v) for g, v in grads_and_vars]
        # opt = optimizer.apply_gradients(grads_and_vars)

        self.S_label_pattern = S_label_pattern
black_box_model = keras.Sequential([
    Conv2D(32,
           kernel_size=(3, 3),
           activation='relu',
           kernel_initializer='he_normal',
           input_shape=input_shape),
    Conv2D(32,
           kernel_size=(3, 3),
           activation='relu',
           kernel_initializer='he_normal'),
    MaxPool2D((2, 2)),
    Dropout(0.20),
    Conv2D(64, (3, 3),
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'),
    Conv2D(64, (3, 3),
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'),
    MaxPool2D(pool_size=(2, 2)),
    Dropout(0.25),
    Conv2D(128, (3, 3),
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'),
    Dropout(0.25),
    Flatten(),
    Dense(128, activation='relu'),
    BatchNormalization(),
    Dropout(0.25),
    Dense(num_classes, activation='softmax'),
])
Ejemplo n.º 9
0
def model_initial_search(data, test_fraction, random_state, layers,
                         cumulative_time, params):
    X, Y, input_dim, output_dim = dp.data_info(data)
    proj_name = data["proj_name"]
    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=test_fraction, random_state=random_state)
    activation_functions = params["activation_functions"]
    units = params["units"]
    iterations = (len(units) * len(activation_functions))**(
        layers + 1) * len(activation_functions)
    inner_iterations = (len(units) * len(activation_functions))**layers
    options = make_combo(option1=activation_functions, option2=units)
    af_combs = make_pairwise_list(max_depth=layers, options=options)
    print(f'{layers}\t{options}\t{iterations} iterations required')
    best_R = 0.0
    best_param = []
    iteration_n = 1
    for n in range(layers):
        best_param.append(['none', 'none'])
    for inner_iteration in range(inner_iterations):
        for option_in in options:
            inner_list = []
            for k in range(layers):
                inner_list.append(af_combs[inner_iteration][k])
            for activation_out in activation_functions:
                print(inner_list)
                print(f"running iteration {iteration_n}")
                parameter_list = []
                parameter_list.append(option_in)
                parameter_list.extend(inner_list)
                parameter_list.append(activation_out)
                print(
                    f"create input layer with activation of {option_in[0]} and units of {option_in[1]}"
                )
                model = keras.Sequential()
                model.add(
                    keras.layers.Dense(option_in[1],
                                       input_dim=input_dim,
                                       activation=option_in[0]))
                for i in range(len(inner_list)):
                    print(
                        f"create hidden layer {i+1} of activation {inner_list[i][0]} and units {inner_list[i][1]}"
                    )
                    model.add(keras.layers.BatchNormalization(momentum=0.9))
                    model.add(
                        keras.layers.Dense(inner_list[i][1],
                                           activation=inner_list[i][0]))
                print(
                    f"create output layer with activation of {activation_out} and units of {output_dim}"
                )
                model.add(
                    keras.layers.Dense(output_dim, activation=activation_out))
                model.compile(loss='mean_squared_error',
                              optimizer='adam',
                              metrics=[R_squared])
                earlystop = keras.callbacks.EarlyStopping(
                    monitor='val_R_squared',
                    min_delta=0.0001,
                    patience=20,
                    mode='auto')
                collection_folder = './Results/%s_collection%d' % (proj_name,
                                                                   layers)
                if not os.path.exists(collection_folder):
                    os.makedirs(collection_folder)
                output_folder = './Results/%s_collection%d/intermediate_output%d' % (
                    proj_name, layers, iteration_n)
                if not os.path.exists(output_folder):
                    os.makedirs(output_folder)
                filepath = output_folder + "/weights-{epoch:02d}-{val_R_squared:.2f}.hdf5"
                checkpoint = keras.callbacks.ModelCheckpoint(
                    filepath,
                    monitor='val_R_squared',
                    verbose=1,
                    save_best_only=False,
                    save_weights_only=True,
                    mode='auto',
                    period=10)
                callbacks_list = [earlystop, checkpoint]
                start = time.time()
                history = model.fit(X_train,
                                    Y_train,
                                    epochs=300,
                                    batch_size=10,
                                    callbacks=callbacks_list,
                                    validation_split=0.2,
                                    verbose=0)
                end = time.time()
                cumulative_time += (end - start)
                print('it already took %0.2f seconds' % (cumulative_time))
                scores = model.evaluate(X_test, Y_test, verbose=0)
                if not os.path.exists("./Results/results%d.txt" % (layers)):
                    f = open("./Results/results%d.txt" % (layers), "w+")
                else:
                    f = open("./Results/results%d.txt" % (layers), "a+")
                f.write("For this combination %s, R is %0.2f\r\n" %
                        (parameter_list, scores[1]))
                if scores[1] > best_R:
                    best_param = parameter_list
                    best_R = scores[1]
                else:
                    pass
                f.write("The best_R for now is %0.4f and combination is %s " %
                        (best_R, best_param))
                iteration_n += 1
                x = {
                    "layer_number": layers,
                    "starting_n": iteration_n - 1,
                    "best_R": best_R,
                    "best_param": best_param,
                    "cumulative_time": cumulative_time
                }
                print(x)
                pr.check_write(x, 'latest.json')
                print("")
        print("")
    f.close()
    print(best_param)
    print(best_R)
    print('Training process has been finished')
    print('model took %0.2f seconds to train' % (cumulative_time))
    return best_param, best_R
Ejemplo n.º 10
0
def main():

	global errors

	# print opening header
	print()
	tuna.print_tuna()
	print( "\nstocktuna by Justin Bodnar\n" )
	print( "Can we teach computers to speculate?\n" )

	# main program infinite loop
	choice = 420
	while int(choice) > 0:
		choice = 0

		# main menu text
		print( "Menu" )
		print( "0. EXIT" )
		print( "1. Create new data sets" )
		print( "2. Extend data set" )
		print( "3. List and analyze available data sets" )
		print( "4. Train a model on a data set" )
		print( "5. View a random data point and tag" )
		print( "6. Graph a random data point and tag (uses MatPlotLib)" )
		print( "7. Watch a model make 10,000 predictions" )

		# get user chice
		choice = int(input( "\nEnter choice: "))

		# EXIT
		if choice == 0:
			print( "\nEXITING\n" )
			exit()

		# choice == 1
		# create new data set
		elif choice == 1:

			# get user parameters
			level = int(input("Enter data level: "))
			size = int(input("Enter size of dataset: "))
			n = int(input("Enter the number of days to look at: "))
			d = int(input("Enter number of days invested: "))
			filename = str(level)+"-"+str(size)+"-"+str(n)+"-"+str(d)

			# create data set
			data, tags = create_data_set(level, size, n, d)

			# output
			print( "Dataset saved as ./datasets/"+ filename+"_tags and ./datasets/"+ filename+"_data" ) 
			print( "Filename: level-size-n-d_[data|tags]" )
			# wait for user  input
			pause = input( "Press enter to continue" )

		# choice == 2
		# extend a data set
		elif choice == 2:

			# try-catch block
			try:
				# get user input
				dataset_filename, level, size, n, d = cli_choose_dataset()

				# check for 0 datasets
				if level < 0:
					print( "NO DATASETS AVAILABLE. BUILD ONE TO CONTINUE." )
					continue

				# get user input
				number_of_data_to_add = int(input("Enter number of data points to add: "))
				size_of_new_dataset = number_of_data_to_add + size

				# unpickle lists
				data = pickle.load( open( dataset_filename + "_data", "rb" ) )
				tags = pickle.load( open( dataset_filename + "_tags", "rb" ) )

				# get new list
				newData, newTags = create_data_set(int(level), number_of_data_to_add, int(n), int(d))

				# append lists
				data += newData
				tags += newTags

				# make new filename
				new_filename = str(level) + "-" + str(size_of_new_dataset) + "-" + str(n) + "-" + str(d)

				# repickle list
				pickle.dump( data, open( "./datasets/"+new_filename+"_data", "wb" ) )
				pickle.dump( tags, open( "./datasets/"+new_filename+"_tags", "wb" ) )

			# print errors
			except Exception as e:
				if errors:
					print( e )
					print(tuna.print_exception())
				pass

		# choice == 3
		# analyze available data sets
		elif choice == 3:

			# get user input
			dataset_filename, level, size, n, d = cli_choose_dataset()

			# check for 0 datasets
			if level < 0:
				print( "NO DATASETS AVAILABLE. BUILD ONE TO CONTINUE." )
				continue

			# try to unpickle dataset file
			try:
				# unpickle
				data_set = pickle.load( open( dataset_filename+"_data", "rb" ) )

				# get length of dim 2
				min = 99999999
				max = -1

				# loop through dim 1, checking each entry alonog dim 2 for size
				for data_point in data_set:

					# check for min or max
					if len(data_point) > max:
						max = len(data_point)
					if len(data_point) < min:
						min = len(data_point)
				# print output
				print( "\nName: ", dataset_filename )
				print( "Dim 1:", len(data_set), "(size)")
				if min == max:
					print( "Dim 2:", min, "(n)" )
				else:
					print( "Data set irregular with bounds (", min, ",", max, ")" )
					print( "Fixing with lower bound", min, "as new dim2 size" )

					# loop through dim 1, creating new dataset of proper dim 2 size
					regularized_data_set = []
					for data_point in data_set:
						regularized_data_set.append( data_point[-min:] )

					# replace the old dataset with the regularized one
					data_set = regularized_data_set

					# get new stats
					min = 999999
					max = -1
					# for each data_point
					for data_point in data_set:
						# check for new min or max
						if len(data_point) < min:
							min = len(data_point)
						if len(data_point) > max:
							max = len(data_point)

					# print new datset stats
					if min == max:
						print( "New dim 2:", min )
						print( "Repickling. Please rerun this function to confirm updates" )
						pickle.dump( data_set, open( "./datasets/"+file, "wb" ) )
					else:
						print( "Data set STILL irregular with bounds (", min, ",", max, ")" )

			# print errors
			except Exception as e:
				if errors:
					print( e )
					print(print_exception())
				pass

			print()

			# wait for user to press enter
			pause = input( "Press enter to continue." )

		# choice == 4
		# build model from data set
		elif choice == 4:

			# try to unpickle data set and train classifier
			try:

				# get user input
				dataset_filename, level, size, n, d = cli_choose_dataset()

				# check for 0 datasets
				if level < 0:
					print( "NO DATASETS AVAILABLE. BUILD ONE TO CONTINUE." )
					continue

				# get user input
				print( "Using 3-layer neural network" )
				epochs = int(input("Enter number of epochs: "))
				layer1 = int(input("Enter number of nodes for Layer 1: "))
				layer2 = int(input("Enter number of nodes for Layer 2: "))
				layer3 = int(input("Enter number of nodes for Layer 3: "))

				# create model filename
				model_filename = dataset_filename.split("/")[1] + "-" + str(layer1) + "-" + str(layer2) + "-" + str(layer3)

				# unpickle the data and tags lists
				tags = pickle.load( open( dataset_filename+"_tags", "rb" ) )
				data = pickle.load( open( dataset_filename+"_data", "rb" ) )

				# print output
				print("tags initial size:", len(tags))
				print("data initial size:", len(data))

				# split data into training/testing
				size = int( len(data)*(0.75) )
				train_data = np.array( data[1:size] )
				train_tags = np.array( tags[1:size] )
				test_data = np.array( data[size:] )
				test_tags = np.array( tags[size:] )

				# print output
				print("tags training size:", len(train_tags))
				print("data training size:", len(train_data))
				print("tags testing size:", len(test_tags))
				print("data testing size:", len(test_data))

				# train model
				model = keras.Sequential()
				model.add( keras.layers.Dense( layer1, input_dim=len(data[0]) ) )
				model.add( keras.layers.Dense( layer2, input_dim=26 ) )
				model.add( keras.layers.Dense( layer3, input_dim=13 ) )
				model.add( keras.layers.Dense(2, activation=tf.nn.softmax) )
				model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
				model.fit(train_data, train_tags, epochs=epochs)

				# calculate test loss and est acc
				test_loss, test_acc = model.evaluate(test_data, test_tags)

				print('Test accuracy:', test_acc)
				print( "Save model? Y or N" )
				save_choice = input( "\nEnter choice: ")

				if save_choice is "Y" or save_choice is "y":
					# save model
					model.save("./models/"+model_filename)

					# print output
					print( "Model saved" )
					print( "Filename: " + model_filename )
					print( "Filename: level-size-n-d-epochs-layer1-layer2-layer3\n" )

			# print errors
			except Exception as e:
				if errors:
					print( e )
					print(tuna.print_exception())
				pass

			# pause for user input
			pause = input( "Press enter to continue" )

		# choice == 5
		# grab and view random datum
		elif choice == 5:
			level = int(input("\nEnter data level: "))
			n = int(input("Enter number of days to look at before investing: "))
			d = int(input("Enter number of days to have been invested: "))
			random_investment( level, n, d, True )

		# choice == 6
		# build model from data set
		elif choice == 6:
			level = int(input("\nEnter data level: "))
			n = int(input("Enter number of days to look at before investing: "))
			d = int(input("Enter number of days to have been invested: "))
			stock_ticker, data, dates, tag = random_investment( level, n, d, False )
			graph_data_set( stock_ticker, data, dates, level, n, d, tag )

		# choice == 7
		# watch model make predictions
		elif choice == 7:

			# get model choice
			model_filename, level, size, n, d, layer1, layer2, layer3 = cli_choose_model()

			# get how many predictions
			count = int(input( "How many predictions to make? "))

			# check for no models
			if level < 0:
				standyby = input( "NO MODELS TO LOAD. PRESS ENTER TO CONTINUE" )
				continue

			cli_make_predictions( count, model_filename, level, n, d )

		# choice != VALID
		else:
			pause = input("Invalid choice\nPress enter to continue.")
Ejemplo n.º 11
0
warnings.filterwarnings('ignore')

inputs = np.random.uniform(size=(10, 3, 30, 30))
params = {
    'kernel_h': 5,
    'kernel_w': 5,
    'pad': 0,
    'stride': 2,
    'in_channel': inputs.shape[1],
    'out_channel': 64,
}
layer = Convolution(params)
out = layer.forward(inputs)

keras_model = keras.Sequential()
keras_layer = layers.Conv2D(filters=params['out_channel'],
                            kernel_size=(params['kernel_h'], params['kernel_w']),
                            strides=(params['stride'], params['stride']),
                            padding='valid',
                            data_format='channels_first',
                            input_shape=inputs.shape[1:])
keras_model.add(keras_layer)
sgd = optimizers.SGD(lr=0.01)
keras_model.compile(loss='mean_squared_error', optimizer='sgd')
weights = np.transpose(layer.weights, (2, 3, 1, 0))
keras_layer.set_weights([weights, layer.bias])
keras_out = keras_model.predict(inputs, batch_size=inputs.shape[0])
print('Relative error (<1e-6 will be fine): ', rel_error(out, keras_out))

# %% [markdown]
Ejemplo n.º 12
0
def main():
    fashion_mnist = keras.datasets.fashion_mnist

    (train_images, train_labels), (test_images,
                                   test_labels) = fashion_mnist.load_data()

    print("Data Info:")
    print("train img shape: " + str(train_images.shape))
    print("Len: " + str(len(train_labels)))
    print("Train Labels: " + str(train_labels))
    print("Test img shape: " + str(test_images.shape))
    print("Len: " + str(len(test_labels)))
    print("--------------------")

    #Normalize data
    train_images = train_images / 255.0

    test_images = test_images / 255.0

    #Create model
    model = keras.Sequential([
        keras.layers.Flatten(input_shape=(28, 28)),
        keras.layers.Dense(1024, activation=tf.nn.relu),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(512, activation=tf.nn.sigmoid),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(10, activation=tf.nn.softmax)
    ])

    #Compile model
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    workers = mp.cpu_count()
    batch_size = 512

    model.fit_generator(generator=SeqGen(train_images,
                                         train_labels,
                                         batch_size=batch_size),
                        epochs=10,
                        verbose=1,
                        workers=workers)

    #Evaluate model
    test_loss, test_acc = model.evaluate(test_images, test_labels)

    print('Test accuracy:', test_acc)

    #Make predictions
    predictions = model.predict(test_images)
    print("Prediction: " + str(predictions[0]))

    #Plot results
    num_rows = 5
    num_cols = 3
    num_images = num_rows * num_cols
    plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
    for i in range(num_images):
        plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
        plot_image(i, predictions, test_labels, test_images)
        plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
        plot_value_array(i, predictions, test_labels)
        plt.show()
Ejemplo n.º 13
0
model = keras.Sequential([
    keras.layers.Conv2D(input_shape=(21, 21, 8),
                        filters=16,
                        kernel_size=3,
                        padding='same',
                        activation='selu',
                        strides=(2, 2)),
    keras.layers.BatchNormalization(),
    keras.layers.Dropout(0.5),
    keras.layers.Conv2D(filters=128,
                        kernel_size=5,
                        padding='same',
                        activation='selu',
                        strides=(2, 2)),
    keras.layers.BatchNormalization(),
    keras.layers.Dropout(0.5),
    keras.layers.Conv2D(filters=256,
                        kernel_size=5,
                        padding='same',
                        activation='selu',
                        strides=(2, 2)),
    keras.layers.BatchNormalization(),
    keras.layers.Dropout(0.5),
    #keras.layers.Conv2D(filters=32, kernel_size=3,padding='same',activation='selu',strides=(2,2)),
    #keras.layers.BatchNormalization(),
    #keras.layers.Dropout(0.5),
    #keras.layers.Conv2D(filters=128, kernel_size=3,padding='same',activation='selu',strides=(2,2)),
    #keras.layers.BatchNormalization(),
    keras.layers.Flatten(),
    keras.layers.Dense(units=n_node, activation='sigmoid'),
    keras.layers.Dropout(drate),
    keras.layers.Dense(units=1, activation='linear')
])
Ejemplo n.º 14
0
def main(dim=84):
    global Local, LOGGER

    LOGGER.info('starting main for {} dimensions'.format(dim))

    # read data
    train = pd.read_hdf("train.h5", "train")
    test = pd.read_hdf("test.h5", "test")
    index = test.index

    X = train.drop(['y'], axis=1).values
    y = train.pop('y').values
    test = test.values

    X = StandardScaler().fit_transform(X)
    test = StandardScaler().fit_transform(test)

    enc_dim = dim  # enc_dim: dimension to encode to

    # create input layer
    i_layer = Input(shape=(120, ))

    # create intermediate encoding layer
    interm_dim = 480
    interm_enc = Dense(interm_dim, activation='sigmoid')(i_layer)

    # create encoded layer
    # e_layer = Dense(enc_dim, activation = 'relu')(i_layer)
    e_layer = Dense(enc_dim, activation='sigmoid')(interm_enc)

    # create intermediate decoding layer
    interm_dec = Dense(interm_dim, activation='sigmoid')(e_layer)

    # create decoded layer
    d_layer = Dense(120)(interm_dec)

    # create auto-encoder, the model that maps input straight to output
    auto_encoder = Model(i_layer, d_layer)

    # encoder: map input to lower dimension
    encoder = Model(i_layer, e_layer)

    # # create model for decoding
    # enc_input = Input(shape = (enc_dim,))
    # dec_layer = auto_encoder.layers[-1]
    # decoder = Model(enc_input, dec_layer(enc_input))

    # now let's train!
    # NOTE: we encode our entire X!
    auto_encoder.compile(optimizer='adadelta', loss='binary_crossentropy')
    auto_encoder.fit(X, X, epochs=500)

    # and now we can encode our data:
    X = encoder.predict(X)
    test = encoder.predict(test)

    # update user
    print('encoding done!')

    # now we do regular prediction on encoded data, if the local-flag is set to True
    X_train, X_test, y_train, y_test = (train_test_split(
        X, y, test_size=0.33, random_state=42) if Local else
                                        (X, y, test, test))

    # Model 2 from earlier
    model = keras.Sequential([
        keras.layers.Dense(120, activation=tf.nn.relu, input_dim=enc_dim),
        keras.layers.Dense(120, activation=tf.nn.tanh),
        keras.layers.Dropout(0.25),
        keras.layers.Dense(120, activation=tf.nn.relu),
        keras.layers.Dense(120, activation=tf.nn.relu),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(5, activation=tf.nn.softmax)
    ])
    model.name = 'model2'

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    #
    model.fit(X_train, y_train, epochs=1000)

    LOGGER.info('Done, {} now'.format('evaluating' if Local else 'predicting'))

    if Local:
        # if local is set to True: evluate on local data
        results = model.evaluate(X_test, y_test)
        print("done :D")
        print(results)

        LOGGER.info('evaluation done, results:')
        LOGGER.info(results)
        now = dt.now()
        LOGGER.info(
            'timestamp: ' +
            '{}, {}, {}\n\n'.format(now.month, now.day, now.hour, now.minute))
    else:
        # otherwise predict test-set and print to csv
        y_pred = model.predict_classes(test)
        resf = pd.DataFrame({'Id': index, 'y': y_pred})

        # get the filename:
        # import datetime.datetime as dt
        now = dt.now()
        filename = 'Results_task3_{}_{}_{}_{}.csv'.format(
            now.month, now.day, now.hour, now.minute)
        resf.to_csv(filename, index=False)
        print('Done')
Ejemplo n.º 15
0
 def __init__(self):
     self.model = keras.Sequential()
     self.plot_losses = PlotLossesCallback()
     self.save_model_img = 'image/convolutional_neural_network_model_cifar10.png'
     self.save_model_file = 'model/convolutional_neural_network_model_cifar10.h5'
Ejemplo n.º 16
0
import keras
from keras.models import load_model
import cv2
import numpy as np
from libs.hdf5datasetgeneratormultilabel import HDF5DatasetGenerator
from libs.hdf5datasetwriter import HDF5DatasetWriter
model = load_model("ResNet50")
model1 = keras.Sequential()
model.pop()
model.pop()
for layer in model.layers:
    model1.add(layer)
model = model1
print(model.summary())
BATCH_SIZE = 32

trainGen = HDF5DatasetGenerator("hdf5/train2.hdf5", BATCH_SIZE, classes=5)
valGen = HDF5DatasetGenerator("hdf5/val.hdf5", BATCH_SIZE, classes=5)
print(trainGen.db["images"].shape[0])
print(valGen.db["images"].shape[0])
train_dataset = HDF5DatasetWriter((trainGen.db["images"].shape[0], 2048),
                                  "hdf5/train_features_ResNet50.hdf5",
                                  dataKey="features",
                                  bufSize=1000)

features = model1.predict(trainGen.db["images"])
#f1=[]
#for feature in features:
#    f1.append(feature.flatten())
#features=np.array(f1)
print(features.shape)
Ejemplo n.º 17
0
train_images.shape

train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10, 10))
for i in range(25):
    plt.subplot(5, 5, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[train_labels[i]])

model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),
    keras.layers.Dense(784, activation=tf.nn.relu),
    keras.layers.Dense(392, activation=tf.nn.relu),
    keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adadelta',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=20)

test_loss, test_acc = model.evaluate(test_images, test_labels)

print('Test accuracy:', test_acc)

model.save('my_model.h5')
print("Saved model to disk")
def app():
    # 数据集加载
    imdb = keras.datasets.imdb
    (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
    logger.info("Raw train entries: {}, labels: {}".format(len(train_data), len(train_labels)))
    logger.info("Raw test  entries: {}, labels: {}".format(len(test_data), len(test_labels)))
    print()
    # 获取数据集的 词汇索引字典
    d_word_index = imdb.get_word_index()
    # l_words = ['\t'.join([str(word), str(w_index)]) for word, w_index in d_word_index.items()][:10]
    # some_words = "\n".join(l_words)
    # print(f"{some_words}")

    # 新增 词汇符号 进入词汇索引字典,并调整词汇索引
    # print(decode_text(d_word_index, train_data[0]))  # 文本原文(调整词汇索引前,貌似转换的内容不对)
    # print()
    d_word_index = {k: (v + 3) for k, v in d_word_index.items()}
    d_word_index["<PAD>"] = 0
    d_word_index["<START>"] = 1
    d_word_index["<UNK>"] = 2  # unknown
    d_word_index["<UNUSED>"] = 3

    # 查看具体的文本数据情况
    # print(train_data[0])  # 文本数字化情况
    print(decode_text(d_word_index, train_data[0]))  # 文本原文
    # print(f"train_data[0] len: {len(train_data[0])}")  # 不同文本的单词长度可能不同
    # print(f"train_data[1] len: {len(train_data[1])}")

    # 将原始文本数据,进行 预处理编码
    train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=d_word_index["<PAD>"], padding='post',
                                                            maxlen=256)
    test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=d_word_index["<PAD>"], padding='post',
                                                           maxlen=256)
    logger.info("Train shape: {}, Test shape: {}".format(train_data.shape, test_data.shape))
    # 创建交叉验证集
    x_val = train_data[:10000]
    y_val = train_labels[:10000]
    partial_x_train = train_data[10000:]
    partial_y_train = train_labels[10000:]

    # 模型设计
    # 输入形状是用于电影评论的词汇数目(10,000 词)
    vocab_size = 10000
    model = keras.Sequential()
    model.add(keras.layers.Embedding(vocab_size, 16))
    model.add(keras.layers.GlobalAveragePooling1D())
    model.add(keras.layers.Dense(16, activation='relu'))
    model.add(keras.layers.Dense(1, activation='sigmoid'))
    model.summary()  # 查看模型结构

    # 设置模型训练参数
    optimizer = "adam"
    loss_fnc = "binary_crossentropy"
    metrics = ["accuracy"]
    model.compile(optimizer=optimizer, loss=loss_fnc, metrics=metrics)

    # 进行模型训练
    history = model.fit(partial_x_train,
                        partial_y_train,
                        epochs=40,
                        batch_size=512,
                        validation_data=(x_val, y_val),
                        verbose=1)
    # 模型训练过程状态
    show_image(history)

    # 模型评估
    results = model.evaluate(test_data, test_labels, verbose=2)
    print(results)

    pass
Ejemplo n.º 19
0
X = df2[df2.columns[:-1]]
X.shape

#Scaling the data
transformer = RobustScaler().fit(X)
X = transformer.transform(X)
X
###validation split
X, valX, Y, valY = train_test_split(X, Y, test_size=0.2, random_state=0)
X.shape

model = keras.Sequential([
    keras.layers.Dense(1000, activation='relu', input_dim=32),
    keras.layers.Dense(800, activation='relu'),
    keras.layers.Dense(640, activation='relu'),
    keras.layers.Dense(580, activation='relu'),
    keras.layers.Dense(330, activation='relu'),
    keras.layers.Dense(250, activation='relu'),
    keras.layers.Dense(100, activation='relu'),
    keras.layers.Dense(42, activation='softmax')
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

plot_model(model,
           to_file='model_plot.png',
           show_shapes=True,
           show_layer_names=True)

with tf.device('/device:GPU:0'):
Ejemplo n.º 20
0
resnet = keras.applications.resnet50.ResNet50(include_top=True,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None)
from keras.layers import Dropout, Flatten, Dense

#########################################################
# Locking First 51 Layer #  Non Trainable
#########################################################
for layer in resnet.layers[:51]:
    layer.trainable = False

#########################################################
# Creating Model
#########################################################
top_model = keras.Sequential()
top_model.add(resnet)
top_model.add(Dense(512, activation="relu"))
top_model.add(Dense(256, activation="relu"))
top_model.add(Dense(64, activation="relu"))
top_model.add(Dense(1, activation="tanh"))

#########################################################
# Load Weights From early trained models if you want
#########################################################
#checkpoint_folder="gdrive/My Drive/Self-Driving-Car-Data/CheckPointModel/"
#top_model.load_weights(checkpoint_folder+"saved-model-{epoch:02d}-{val_acc:.2f}.hdf5")

#########################################################
# Defining Root Mean Squared Error Function
#########################################################
Ejemplo n.º 21
0
 def call(self, x):
     return keras.layers.Add()([x, keras.Sequential(self.layers)(x)])
Ejemplo n.º 22
0
import keras
import numpy as np
import tensorflow as tf
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')

xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)

#model.fit(xs, ys, epochs=500)
#print(model.predict([10.0]))


#Main model function
class Model(object):
    def __init__(self):
        self.w = tf.Variable(10.0)
        self.b = tf.Variable(10.0)

    def __call__(self, x):
        return self.w * x + self.b


model = Model()
xs = [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]
ys = [-3.0, -1.0, 1.0, 3.0, 5.0, 7.0]
print(model(xs))


#Loss function
def loss(predicted_Y, target_Y):
Ejemplo n.º 23
0
df.groupby('station_id')['air_data_value'].sum()

df['air_data_value'].describe()

df = df.drop(columns = 'station_id')
df = df.drop(columns = 'stime')
df = df.drop(columns = 'UGRD')
df = df.drop(columns = 'VGRD')
df = df.drop(columns = 'air_data_value')

X = df.drop(columns = ('PM_buckets'))
y = df['PM_buckets']

X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2,random_state = 1)

model = keras.Sequential([
    keras.Input((5)),
    layers.Dense(128,activation = 'relu'),
    layers.Dense(32,activation = 'relu'),
    layers.Dense(1)
])

model.summary()

model.compile(optimizer = 'adam',  loss = 'mean_squared_error',metrics = "accuracy")

model.evaluate(X_test,y_test)

y_pred = model.predict(X_test)
    print(":".join([str(c), str(x)]))
    
result = reply_df.query('Label == '+ str(compatible_class))
print(result['Words'])

"""# モデルの作成(Keras)"""

import keras
import tensorflow as tf
import numpy as np
import numpy as np

#--モデルの作成--#
ips = classify_dictionary.shape[1] #  辞書の単語数をshapeに変換
l = np.unique(classify_label) #ラベルの個数を取得
model_keras = keras.Sequential()
#隠れ層(input_shapeは辞書の長さの変数、アウトプットは使われたラベルの個数とする)
model_keras.add(keras.layers.Dense(32, input_shape=(ips,)))
model_keras.add(keras.layers.Dense(16, activation=tf.nn.relu))
model_keras.add(keras.layers.Dense(l.shape[0], activation=tf.nn.sigmoid))
model_keras.summary()


#--モデルのコンパイル--#
model_keras.compile(optimizer=tf.train.AdamOptimizer(), 
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
#--学習--#
history = model_keras.fit(classify_dictionary,
                    classify_label,
                    epochs=300,
Ejemplo n.º 25
0
y_train = None
x_dev = None
y_dev = None
x_test = None
y_test = None

dataset_predict = np.loadtxt("test.csv", delimiter=",", skiprows=1)
x_predict = None

# hyperparameters
input_dim = None  # TODO
epochs = 5
batch_size = 32

# initialize
model = K.Sequential()

# architecture
model.add(K.layers.Dense(units=1, activation='sigmoid', input_dim=input_dim))

# loss
model.compile(loss=K.losses.categorical_crossentropy,
              optimizer=K.optimizers.Adam)

# train: x_train and y_train are Numpy arrays
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)  # TODO

# evaluate: dev set
loss_and_metrics_dev = model.evaluate(x_dev, y_dev, batch_size=batch_size)

# evaluate: test set
Ejemplo n.º 26
0
        netDetailsItem = []
        layers = []

        if layer2 == 0:

            layers.append(
                keras.layers.Dense(layer1, input_dim=2, activation="tanh"))
            layers.append(keras.layers.Dense(1, activation="linear"))
        else:

            layers.append(
                keras.layers.Dense(layer1, input_dim=2, activation="tanh"))
            layers.append(keras.layers.Dense(layer2, activation="tanh"))
            layers.append(keras.layers.Dense(1, activation="linear"))

        modell = keras.Sequential(layers)
        print(modell.summary())

        model = NetSlice.NetSlice(modell, 'keras', data)

        routineSettings = {"CompileAll": True, "SaveAll": None}

        trainRoutine = [{
            "Compile": [
                keras.optimizers.Adam(lr=0.1), 'mean_squared_error',
                ['binary_accuracy', 'categorical_accuracy']
            ],
            "Train": [100, None, 0]
        }, {
            "Compile": [
                keras.optimizers.Adam(lr=0.01), 'mean_squared_error',
Ejemplo n.º 27
0
    def on_train_begin(self, logs={}):
        self.losses = []
        self.accuracys = []

    def on_batch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.accuracys.append(logs.get('accuracy'))


(train_x, train_y), (test_x, test_y) = fashion_mnist.load_data()
train_x = train_x.reshape(-1, 28 * 28)
test_x = test_x.reshape(-1, 28 * 28)

model = keras.Sequential([
    keras.layers.Dense(28 * 28, input_shape=(28 * 28, ), activation='relu'),
    keras.layers.Dense(128, activation='relu'),
    keras.layers.Dense(32, activation='softmax')
])

model.compile(
    optimizer='adam',
    loss='sparse_categorical_crossentropy',
    metrics=['accuracy'],
)
history = LossHistory()
model.fit(train_x, train_y, epochs=5, batch_size=500, callbacks=[history])

test_loss, test_acc = model.evaluate(
    test_x,
    test_y,
    verbose=2,
Ejemplo n.º 28
0
def hyperas_build_hani(same_train_paths, diff_train_paths, same_val_paths,
                       diff_val_paths, same_test_paths, diff_test_paths):
    """
    A template function for Hyperas to execute tests 
    """
    import keras
    K = keras.backend
    KL = keras.layers

    # generators
    training_generator = LFWDataLoader(same_train_paths,
                                       diff_train_paths,
                                       shuffle=True)
    validation_generator = LFWDataLoader(same_val_paths, diff_val_paths)

    input_shape = (250, 250, 1)

    from datetime import datetime
    from os.path import isdir, exists
    from src.lfw_dataset import _extract_samples_paths, train_info_url, test_info_url
    import numpy as np
    first_input = KL.Input(input_shape)
    second_input = KL.Input(input_shape)

    def tanh_scaled(x):
        A = 1.7159
        B = 2 / 3
        return A * K.tanh(B * x)

    model_params = {}

    act = model_params.get('act', tanh_scaled)
    dropout = model_params.get('dropout', 0)
    batchnorm = model_params.get('batchnorm', False)
    #loss = model_params.get('loss', contrastive_loss)
    learning_rate = model_params.get('learning_rate', 1e-3)
    first_input = KL.Input(input_shape)
    second_input = KL.Input(input_shape)

    batchnorm = True
    dropout = True
    act = 'relu'

    model = keras.Sequential()

    initialize_weights_conv = keras.initializers.glorot_uniform(
        seed=84)  # filters initialize
    initialize_weights_dense = keras.initializers.glorot_uniform(
        seed=84)  # dense initialize
    initialize_bias = keras.initializers.RandomNormal(
        mean=0.5, stddev=0.01, seed=84)  # bias initialize

    model.add(
        KL.Conv2D({{choice([5, 10, 14, 30, 60])}}, (6, 6),
                  strides=(2, 2),
                  activation=act,
                  input_shape=input_shape,
                  kernel_initializer=initialize_weights_conv,
                  kernel_regularizer=l2({{uniform(0, 0.1)}})))

    model.add(KL.BatchNormalization())
    model.add(KL.Dropout({{uniform(0, 0.5)}}))
    model.add(KL.MaxPool2D())

    model.add(
        KL.Conv2D({{choice([5, 10, 14, 30, 60])}}, (6, 6),
                  strides=(2, 2),
                  activation=act,
                  kernel_initializer=initialize_weights_conv,
                  bias_initializer=initialize_bias,
                  kernel_regularizer=l2({{uniform(0, 0.1)}})))
    model.add(KL.BatchNormalization())
    model.add(KL.Dropout({{uniform(0, 0.5)}}))
    model.add(KL.MaxPool2D())

    model.add(
        KL.Conv2D({{choice([5, 10, 14, 30, 60])}}, (6, 6),
                  activation=act,
                  kernel_initializer=initialize_weights_conv,
                  bias_initializer=initialize_bias,
                  kernel_regularizer=l2({{uniform(0, 0.1)}})))

    model.add(KL.BatchNormalization())
    model.add(KL.Dropout({{uniform(0, 0.5)}}))
    model.add(KL.MaxPool2D())

    model.add(KL.Flatten())

    model.add(
        KL.Dense({{choice([40, 64, 128, 256])}},
                 activation=act,
                 kernel_regularizer=l2({{uniform(0, 0.1)}}),
                 kernel_initializer=initialize_weights_dense,
                 bias_initializer=initialize_bias))
    model.add(KL.Dropout({{uniform(0, 0.5)}}))
    model.add(
        KL.Dense({{choice([40, 64, 128, 256])}},
                 activation=None,
                 kernel_regularizer=l2({{uniform(0, 0.1)}}),
                 kernel_initializer=initialize_weights_dense,
                 bias_initializer=initialize_bias))
    model.add(KL.Dropout({{uniform(0, 0.5)}}))

    # Generate the encodings (feature vectors) for the two images
    encoded_l = model(first_input)
    encoded_r = model(second_input)

    # calculate similarity
    L1_distance = KL.Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))(
        [encoded_l, encoded_r])
    similarity = KL.Dense(1,
                          activation='sigmoid',
                          kernel_initializer=initialize_weights_dense,
                          bias_initializer=initialize_bias)(L1_distance)
    final_network = keras.Model(inputs=[first_input, second_input],
                                outputs=similarity)
    optimizer = keras.optimizers.SGD(lr={{uniform(0.0001, 0.1)}},
                                     momentum={{uniform(0, 1)}},
                                     decay={{uniform(0, 0.1)}})
    final_network.compile(loss='binary_crossentropy',
                          optimizer=optimizer,
                          metrics=['accuracy'])

    start_time = datetime.now()
    history = final_network.fit_generator(generator=training_generator,
                                          validation_data=validation_generator,
                                          use_multiprocessing=False,
                                          verbose=2,
                                          epochs=30)
    end_time = datetime.now()

    validation_acc = np.amax(history.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)

    test_generator = LFWDataLoader(same_test_paths, diff_test_paths)
    test_loss, test_accuracy = final_network.evaluate_generator(test_generator)

    return {
        'loss': -validation_acc,
        'status': STATUS_OK,
        'history': history.history,
        'training_time': end_time - start_time,
        'test_loss': test_loss,
        'test_accuracy': test_accuracy
    }
Ejemplo n.º 29
0
def run_fte_bte_exp(data_x, data_y, which_task, model, ntrees=30, shift=0):

    df_total = []

    for slot in range(
            1
    ):  # Rotates the batch of training samples that are used from each class in each task
        train_x, train_y, test_x, test_y = cross_val_data(
            data_x, data_y, shift, slot)

        if model == "odif":
            # Reshape the data
            train_x = train_x.reshape(
                train_x.shape[0],
                train_x.shape[1] * train_x.shape[2] * train_x.shape[3])
            test_x = test_x.reshape(
                test_x.shape[0],
                test_x.shape[1] * test_x.shape[2] * test_x.shape[3])

        if model == "odin":
            clear_session(
            )  # clear GPU memory before each run, to avoid OOM error

            default_transformer_class = NeuralClassificationTransformer

            network = keras.Sequential()
            network.add(
                layers.Conv2D(
                    filters=16,
                    kernel_size=(3, 3),
                    activation="relu",
                    input_shape=np.shape(data_x)[1:],
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=32,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=64,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=128,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=254,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))

            network.add(layers.Flatten())
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(2000, activation="relu"))
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(2000, activation="relu"))
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(units=10, activation="softmax"))

            default_transformer_kwargs = {
                "network": network,
                "euclidean_layer_idx": -2,
                "loss": "categorical_crossentropy",
                "optimizer": Adam(3e-4),
                "fit_kwargs": {
                    "epochs": 100,
                    "callbacks":
                    [EarlyStopping(patience=5, monitor="val_loss")],
                    "verbose": False,
                    "validation_split": 0.33,
                    "batch_size": 32,
                },
            }
            default_voter_class = KNNClassificationVoter
            default_voter_kwargs = {"k": int(np.log2(300))}
            default_decider_class = SimpleArgmaxAverage

            p_learner = ProgressiveLearner(
                default_transformer_class=default_transformer_class,
                default_transformer_kwargs=default_transformer_kwargs,
                default_voter_class=default_voter_class,
                default_voter_kwargs=default_voter_kwargs,
                default_decider_class=default_decider_class,
            )

        elif model == "odif":
            p_learner = LifelongClassificationForest()

        df = fte_bte_experiment(
            train_x,
            train_y,
            test_x,
            test_y,
            ntrees,
            shift,
            slot,
            model,
            p_learner,
            which_task,
            acorn=12345,
        )

        df_total.append(df)

    return df_total
Ejemplo n.º 30
0
from numpy import float32
import tensorflow as tf
import keras
from keras.datasets import fashion_mnist
from keras.layers import Activation, Flatten, Dense

(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()


x_train = x_train/255.0
x_test = x_test/255.0


model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),
    keras.layers.Dense(128, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.save('train-images-idx3-ubyte.gz')

model.load_weights('train-images-idx3-ubyte.gz')

test_loss, test_acc = model.evaluate(x_test, y_test)