def main(arguments):

    if arguments.operation == 'train':
        # fix random seed for reproducibility
        seed=7
        print(seed)
        np.random.seed(seed)
        print(seed)
        # get the train data
        # features: train_data[0], labels: train_data[1]
        train_features, train_labels = data.load_data(dataset=arguments.train_dataset)

        #numerizing/normalizig on scale [0,1] the train dataset/labels
        #returns numpy arrays
        train_features,train_labels=data.normalize(train_features,train_labels)


        # split into 70% for train and 30% for test
        train_features,validation_features,train_labels, validation_labels = train_test_split(train_features, train_labels, test_size=0.30, random_state=seed)

        #reshaping to 3d so the data fit into the lstm model
        #if you are using embedding layer as first layer then you can comment out the next two lines
        train_features = np.reshape(train_features, (train_features.shape[0], 1, train_features.shape[1]))
        validation_features = np.reshape(validation_features, (validation_features.shape[0], 1, validation_features.shape[1]))
        print("Prining Training Features Shape:")
        print(train_features.shape)
        print("Labels")
        print(train_labels.shape)
        print("Printing Validation Features Shape:")
        print(validation_features.shape)
        print("Labels")
        print(validation_labels.shape)

        # create model
        model=lstm_class.create_model(lstm_class(alpha=LEARNING_RATE, batch_size=BATCH_SIZE, cell_size=CELL_SIZE, dropout=DROPOUT,
                            sequence_length=SEQUENCE_LENGTH))
        # train model
        lstm_class.train(lstm_class(alpha=LEARNING_RATE, batch_size=BATCH_SIZE, cell_size=CELL_SIZE, dropout=DROPOUT,
                            sequence_length=SEQUENCE_LENGTH),checkpoint_path=arguments.checkpoint_path,batch_size=BATCH_SIZE,model=model
                    ,model_path=arguments.save_model, epochs=HM_EPOCHS, X_train=train_features,y_train= train_labels,
                    X_val=validation_features,y_val=validation_labels,
                    result_path=arguments.result_path)

    elif arguments.operation == 'test':
        # get the test data
        # features: test_features[0], labels: test_labels[1]
        print("Loading Test Data...")    
        test_features, test_labels = data.load_data(dataset=arguments.test_dataset) 
        # numerizing/normalizig on scale [0,1] the train dataset/labels
        # returns numpy arrays
        print("Normallizing Data...")
        test_features,test_labels=data.normalize(test_features,test_labels)


        #rehaping to 3d so the data match the trained shape of our model
        #if you are trained a model starting with embedding layer then you can comment out the next line
        #test_features = np.reshape(test_features, (test_features.shape[0], 1, test_features.shape[1]))
        lstm_class.predict(batch_size=BATCH_SIZE_TESTING,X_test=test_features,y_test=test_labels,model_path=arguments.load_model,
                            result_path=arguments.result_path)
Esempio n. 2
0
    def train_srcnn(self, iteration):
        # images = low resolution, labels = high resolution
        sess = self.sess
        #load data
        train_label_list = sorted(glob.glob('./dataset/training/gray/*.*'))

        num_image = len(train_label_list)

        sr_model = SRCNN(channel_length=self.c_length, image=self.x)
        v1, v2, prediction = sr_model.build_model()

        with tf.name_scope("mse_loss"):
            loss = tf.reduce_mean(tf.square(self.y - prediction))
        '''
        train_op1 = tf.train.GradientDescentOptimizer(learning_rate=1e-4).minimize(loss, var_list=v1)
        train_op2 = tf.train.GradientDescentOptimizer(learning_rate=1e-5).minimize(loss, var_list=v2)
        train_op = tf.group(train_op1, train_op2)
        '''
        train_op = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)

        batch_size = 3
        num_batch = int(num_image / batch_size)

        init = tf.global_variables_initializer()
        sess.run(init)

        saver = tf.train.Saver(max_to_keep=1)
        if self.pre_trained:
            saver.restore(sess, self.save_path)

        for i in range(iteration):
            total_mse_loss = 0
            for j in range(num_batch):
                for k in range(2, 5):
                    train_image_list = sorted(
                        glob.glob('./dataset/training/X{}/*.*'.format(k)))
                    batch_image, batch_label = preprocess.load_data(
                        train_image_list, train_label_list, j * batch_size,
                        min((j + 1) * batch_size, num_image), self.patch_size,
                        self.num_patch_per_image)
                    mse_loss, _ = sess.run([loss, train_op],
                                           feed_dict={
                                               self.x: batch_image,
                                               self.y: batch_label
                                           })
                    total_mse_loss += mse_loss / (num_batch * 3)
                    # print(mse_loss)

            print('In', i + 1, 'epoch, current loss is',
                  '{:.5f}'.format(total_mse_loss))
            saver.save(sess, save_path=self.save_path)

        print('Train completed')
Esempio n. 3
0
        res_path = os.path.normpath(os.path.join(sys.path[0], args.output_dir, 'Kmeans_%d.txt' %self.n_clusters))
        write_data = np.c_[X, self.y]
        # print(write_data[:1])
        np.savetxt(res_path, write_data, fmt='%d',delimiter=' ')
        print('Prediction results saved in %s' % res_path)

        self.num += 1
        # print(self.num)
        if self.init == 'random':
            if args.method == 'test_kmeans':
                vis_path = os.path.normpath(os.path.join(sys.path[0], args.output_dir, 'Kmeans_%d_%d.png' %(self.n_clusters, self.num)))
            else:
                vis_path = os.path.normpath(os.path.join(sys.path[0], args.output_dir, 'Kmeans_%d.png' %(self.n_clusters)))
            visualize(raw_data, X, self.y, self.n_clusters, 'kmeans', vis_path, self.reduction, self.random_state, **kargs)
        elif self.init == 'kmeans++':
            if args.method == 'test_kmeans++':
                vis_path = os.path.normpath(os.path.join(sys.path[0], args.output_dir, 'Kmeans++_%d_%d.png' %(self.n_clusters, self.num)))
            else:
                vis_path = os.path.normpath(os.path.join(sys.path[0], args.output_dir, 'Kmeans++_%d.png' %(self.n_clusters)))
            visualize(raw_data, X, self.y, self.n_clusters, 'kmeans++', vis_path, self.reduction, self.random_state, **kargs)
        


if __name__ == "__main__":
    # print(sys.path)
    data = load_data(os.path.join(sys.path[0], args.data_dir, 'cluster_data.txt'))
    # print(data)
    # args=parser.parse_args()
    kmeans_clustering = KMeans(args)
    kmeans_clustering.run(data)
Esempio n. 4
0
    def train_vdsr(self, iteration):
        # images = low resolution, labels = high resolution
        sess = self.sess
        #load data
        train_image_list_x2 = sorted(glob.glob('./dataset/training/X2/*.*'))
        train_image_list_x3 = sorted(glob.glob('./dataset/training/X3/*.*'))
        train_image_list_x4 = sorted(glob.glob('./dataset/training/X4/*.*'))
        train_label_list = sorted(glob.glob('./dataset/training/gray/*.*'))

        num_image = len(train_label_list)

        sr_model = VDSR(channel_length=self.c_length, image=self.x)
        prediction, _, l2_loss = sr_model.build_model()

        learning_rate = tf.placeholder(dtype='float32', name='learning_rate')

        with tf.name_scope("mse_loss"):
            loss = tf.reduce_mean(tf.square(self.y - prediction))
            loss += 1e-4 * l2_loss

        train_op = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(loss)
        # optimize = tf.train.AdamOptimizer(learning_rate=learning_rate, momentum=0.9)
        '''
        # gradient clipping = Adam can handle by itself
        gvs = optimize.compute_gradients(loss=loss)
        capped_gvs = [(tf.clip_by_value(grad, -10./learning_rate, 10./learning_rate), var) for grad, var in gvs]
        train_op = optimize.apply_gradients(capped_gvs)
        '''
        batch_size = 3
        num_batch = int((num_image - 1) / batch_size) + 1
        print(num_batch)

        init = tf.global_variables_initializer()
        sess.run(init)

        saver = tf.train.Saver(max_to_keep=2)
        if self.pre_trained:
            saver.restore(sess, self.save_path)

        lr = 1e-3

        for i in range(iteration):
            total_loss = 0  # mse + l2
            total_l2 = 0
            if i % 20 == 19:
                lr = lr * 0.9
            for j in range(num_batch):
                for k in range(3):
                    if k == 0:
                        batch_image, batch_label = preprocess.load_data(
                            train_image_list_x2,
                            train_label_list, j * batch_size,
                            min((j + 1) * batch_size, num_image),
                            self.patch_size, self.num_patch_per_image)
                    if k == 1:
                        batch_image, batch_label = preprocess.load_data(
                            train_image_list_x3,
                            train_label_list, j * batch_size,
                            min((j + 1) * batch_size, num_image),
                            self.patch_size, self.num_patch_per_image)
                    if k == 2:
                        batch_image, batch_label = preprocess.load_data(
                            train_image_list_x4,
                            train_label_list, j * batch_size,
                            min((j + 1) * batch_size, num_image),
                            self.patch_size, self.num_patch_per_image)

                    l2, losses, _ = sess.run(
                        [l2_loss, loss, train_op],
                        feed_dict={
                            self.x: batch_image,
                            self.y: batch_label,
                            learning_rate: lr
                        })
                    total_loss += losses / (num_batch * 3)
                    total_l2 += 1e-4 * l2 / (num_batch * 3)

            print('In', '%04d' % (i + 1), 'epoch, current loss is',
                  '{:.5f}'.format(total_loss - total_l2),
                  '{:.5f}'.format(total_l2))
            saver.save(sess, save_path=self.save_path)

        print('Train completed')