def get_encoded_vector(self,test):
        saver = tf.train.Saver()
        saver.restore(self.session, tf.train.latest_checkpoint(self.working_dir+'/model/'))
        print ('Retored model',ops.look_for_last_checkpoint(self.working_dir + "/model/"))
        merged = tf.summary.merge_all()
        if test.shape[0] > 0:
            i = 0
            iteration = 0
            sim = []
            encoded_data=[]
            loss=0
            while i < len(test):
                start = i
                end = i + self.batch_size
                if (end > len(test)): end = len(test)
                batch_x = test[start:end]
                batch_encoded_data,batch_loss,batch_sim= self.session.run([self.encoder_op,self.cross_entropy,self.cosine_similarity], feed_dict={self.x: batch_x,self.is_train:False})
                iteration += 1
                i += self.batch_size

                sim+=list(batch_sim)
                loss+=batch_loss
                encoded_data.append(batch_encoded_data)


            encoded_data=np.concatenate(encoded_data,axis=0)
            return {'vectors':encoded_data,'loss':loss,'similiarity':sim}
 def predict(self,test):
     saver = tf.train.Saver()
     saver.restore(self.session, tf.train.latest_checkpoint(self.working_dir+'/model/'))
     print ('Retored model',ops.look_for_last_checkpoint(self.working_dir + "/model/"))
     merged = tf.summary.merge_all()
     if 'x' in test and test['x'].shape[0] > 0:
         i = 0
         iteration = 0
         test_prediction=[]
         j=0
         while i < len(test['x']):
             start = i
             end = i + self.batch_size
             if (end > len(test['x'])): end = len(test['x'])
             batch_x = test['x'][start:end]
             if 'y' in test and test['y'].shape[0] > 0:
                 batch_y = test['y'][start:end]
                 pred= self.session.run([self.prediction], feed_dict={self.x: batch_x, self.y: batch_y,self.is_train:False})
             else:
                 pred= self.session.run([self.prediction], feed_dict={self.x: batch_x,self.is_train:False})
             iteration += 1
             i += self.batch_size
             if isinstance(pred,list):
                 test_prediction+=pred[0].tolist()
             else:
                 test_prediction += pred.tolist()
         if 'y' in test and test['y'].shape[0] > 0:
             return np.array(test_prediction)
         else:
             return np.array(test_prediction)
示例#3
0
    def train(self, train, val_data=None, max_keep=100, shuffle=False):
        init = tf.global_variables_initializer()
        self.session.run(init)
        epoch_offset = 0

        saver = tf.train.Saver(max_to_keep=max_keep)
        if self.model_restore == True and self.working_dir != None:
            name = ops.look_for_last_checkpoint(self.working_dir + "/model/")
            if name is not None:
                saver.restore(self.session,
                              self.working_dir + "/model/" + name)
                print('Model Succesfully Loaded : ', name)
                epoch_offset = int(name[6:])

        if self.working_dir != None:
            merged = tf.summary.merge_all()
            train_writer = tf.summary.FileWriter(self.working_dir + '/train',
                                                 self.session.graph)
            test_writer = tf.summary.FileWriter(self.working_dir + '/test')

        for epoch in range(epoch_offset + 1, epoch_offset + self.epochs + 1):
            if shuffle == True:
                ind_list = [i for i in range(len(train['x']))]
                random.shuffle(ind_list)
                train['x'] = train['x'][ind_list]
                train['y'] = train['y'][ind_list]
            epoch_loss = 0
            acc = 0
            i = 0
            batch_iteration = 0
            while i < len(train['x']):
                start = i
                end = i + self.batch_size
                if (end > len(train['x'])): end = len(train['x'])
                batch_x = train['x'][start:end]
                batch_y = train['y'][start:end]
                if self.working_dir != None:
                    summary, _, loss, batch_acc = self.session.run(
                        [
                            merged, self.optimizer, self.cross_entropy,
                            self.accuracy
                        ],
                        feed_dict={
                            self.x: batch_x,
                            self.y: batch_y,
                            self.lr: self.learning_rate,
                            self.is_train: True
                        })
                else:
                    _, loss, batch_acc = self.session.run(
                        [self.optimizer, self.cross_entropy, self.accuracy],
                        feed_dict={
                            self.x: batch_x,
                            self.y: batch_y,
                            self.lr: self.learning_rate,
                            self.is_train: True
                        })
                epoch_loss += loss
                acc += batch_acc
                batch_iteration += 1
                i += self.batch_size
                print(
                    'Training: Accuracy={} loss={}\r '.format(
                        round(batch_acc, 4),
                        round(epoch_loss / batch_iteration, 4)), )
            if self.working_dir != None:
                train_writer.add_summary(summary, epoch)
            self.train_result.append(
                [epoch, epoch_loss / batch_iteration, acc / batch_iteration])
            if val_data != None:
                epoch_loss = 0
                acc = 0
                i = 0
                batch_iteration = 0
                while i < len(val_data['x']):
                    start = i
                    end = i + self.batch_size
                    if (end > len(val_data['x'])): end = len(val_data['x'])
                    batch_x = val_data['x'][start:end]
                    batch_y = val_data['y'][start:end]
                    if self.working_dir != None:
                        summary, loss, batch_acc = self.session.run(
                            [merged, self.cross_entropy, self.accuracy],
                            feed_dict={
                                self.x: batch_x,
                                self.y: batch_y,
                                self.lr: self.learning_rate,
                                self.is_train: False
                            })
                    else:
                        loss, batch_acc = self.session.run(
                            [self.cross_entropy, self.accuracy],
                            feed_dict={
                                self.x: batch_x,
                                self.y: batch_y,
                                self.lr: self.learning_rate,
                                self.is_train: False
                            })
                    epoch_loss += loss

                    acc += batch_acc
                    batch_iteration += 1
                    i += self.batch_size
                    print(
                        'Validation: Accuracy={} loss={}\r '.format(
                            round(batch_acc, 4),
                            round(epoch_loss / batch_iteration, 4)), )
                if self.working_dir != None:
                    test_writer.add_summary(summary, epoch)
                self.test_result.append([
                    epoch, epoch_loss / batch_iteration, acc / batch_iteration
                ])

                print("Training:",
                      self.train_result[len(self.train_result) - 1], "Val:",
                      self.test_result[len(self.test_result) - 1])
            else:
                print("Training :",
                      self.train_result[len(self.train_result) - 1])

            if self.working_dir != None:
                save_path = saver.save(self.session,
                                       self.working_dir + "/model/" + 'model',
                                       global_step=epoch)
        print('Training Succesfully Complete')