示例#1
0
def test():
    input_data = tf.placeholder(tf.int32, [batchSize, maxSeqLength])
    labels = tf.placeholder(tf.int32, [batchSize])

    data = tf.nn.embedding_lookup(wordVectors, input_data)
    data = tf.reshape(data, [batchSize, maxSeqLength, numDimensions, 1])

    train_logits = CNN_model.inference1(data, batchSize, N_CLASSES)
    train_loss = CNN_model.losses(train_logits, labels)
    train_acc = CNN_model.evaluation(train_logits, labels)

    all_accuracy = 0
    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, './models/pretrained_CNN.ckpt-20000')

        for i in range(2000):
            train_batch, train_label_batch = input_text_data.get_allTest(i)
            tra_loss, tra_acc = sess.run([train_loss, train_acc], {
                input_data: train_batch,
                labels: train_label_batch
            })
            all_accuracy = all_accuracy + tra_acc
        print('All_accuracy:')
        print(all_accuracy / 2000)
示例#2
0
def training():
    input_data = tf.placeholder(tf.int32, [batchSize, maxSeqLength])
    labels = tf.placeholder(tf.int32, [batchSize])

    data = tf.nn.embedding_lookup(wordVectors, input_data)
    data = tf.reshape(data, [batchSize, maxSeqLength, numDimensions, 1])

    train_logits = CNN_model.inference1(data, batchSize, N_CLASSES)
    train_loss = CNN_model.losses(train_logits, labels)
    train_op = CNN_model.trainning(train_loss, learning_rate)
    train_acc = CNN_model.evaluation(train_logits, labels)

    sess = tf.InteractiveSession()
    tf.summary.scalar('Loss', train_loss)
    tf.summary.scalar('Accuracy', train_acc)
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter(logs_train_dir, sess.graph)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
    with tf.Session(config=config) as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        for i in range(MAX_STEP):
            train_batch, train_label_batch = input_text_data.getTrainBatch()
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc],
                                            {
                                                input_data: train_batch,
                                                labels: train_label_batch
                                            })

            # 汇总到Tensorboard
            if i % 1000 == 0:
                print("Step %d, train loss = %.2f, train accuracy = %.2f%%" %
                      (i, tra_loss, tra_acc))

                test_batch, test_label_batch = input_text_data.getTestBatch()
                summary = sess.run(merged, {
                    input_data: test_batch,
                    labels: test_label_batch
                })
                writer.add_summary(summary, i)
                test_loss, test_acc = sess.run([train_loss, train_acc], {
                    input_data: test_batch,
                    labels: test_label_batch
                })
                print(
                    "*************, test loss = %.2f, test accuracy = %.2f%%" %
                    (test_loss, test_acc))

            if i % 2000 == 0 or (i + 1) == MAX_STEP:
                save_path = saver.save(sess,
                                       "./models/pretrained_CNN.ckpt",
                                       global_step=i)
                print("saved to %s" % save_path)
        writer.close()
def run_training():
    #目录
    train_dir="./image/"
    logs_train_dir ="./log"

    train,train_label=image_P.get_files(train_dir)
    train_batch,train_label_batch=image_P.get_batch(train,
                                            train_label,
                                            IMG_W,
                                            IMG_H,
                                            BATCH_SIZE,
                                            CAPACITY)
    train_logits=model.inference(train_batch,BATCH_SIZE,N_CLASSES)
    train_loss=model.losses(train_logits,train_label_batch)
    train_op=model.trainning(train_loss,learning_rate)
    train_acc=model.evaluation(train_logits,train_label_batch)

    summary_op=tf.summary.merge_all()

    sess=tf.Session()
    train_writer=tf.summary.FileWriter(logs_train_dir,sess.graph)
    #保存模型
    saver=tf.train.Saver()

    #初始化
    sess.run(tf.global_variables_initializer())

    #使用多线程
    coord=tf.train.Coordinator()
    threads=tf.train.start_queue_runners(sess=sess,coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break#线程停止
            _,temp_loss,temp_acc=sess.run([train_op,train_loss,train_acc])
            
            #每迭代50次打印一次结果
            if step%50 == 0:
                print('Step %d,train loss = %.2f,train accuracy = %.2f'%(step,temp_loss,temp_acc))
                summary_str=sess.run(summary_op)
                train_writer.add_summary(summary_str,step)
            
            #每迭代200次或到达最后一次保存一次模型
            if step%200 == 0 or (step+1) == MAX_STEP:
                checkpoint_path=os.path.join(logs_train_dir,'model.ckpt')
                saver.save(sess,checkpoint_path,global_step=step)
    except tf.errors.OutOfRangeError:
        print('Failed!')
    finally:
        coord.request_stop()

    #结束线程
    coord.join(threads)

    sess.close()
示例#4
0
def CreateModel(model_name, bit, use_gpu):
    if model_name == 'vgg11':
        vgg11 = models.vgg11(pretrained=True)
        cnn_model = CNN_model.cnn_model(vgg11, model_name, bit)
    if model_name == 'alexnet':
        alexnet = models.alexnet(pretrained=True)
        cnn_model = CNN_model.cnn_model(alexnet, model_name, bit)
    if use_gpu:
        cnn_model = cnn_model.cuda()
    return cnn_model
示例#5
0
def train():
	with (tf.Graph().as_default()):
		global_step = tf.contrib.framework.get_or_create_global_step()

		images, display_images, labels = CNN_input.construct_inputs(False, True)

		logits = CNN_model.inference(True, images)

		loss = CNN_model.loss(logits, labels)

		train_op = CNN_model.train(loss, global_step)

		# Logs loss and runtime
		class _LoggerHook(tf.train.SessionRunHook):

			def begin(self):
				self._step = -1
				self._estimated_time_array = []
				for i in range(EST_TIME_SMOOTHING):
					self._estimated_time_array.append(0)

			def before_run(self, run_context):
				self._step += 1
				self._start_time = time.time()
				return tf.train.SessionRunArgs(loss)

			def get_estimated_completion(self):
				total = 0
				for i in range(EST_TIME_SMOOTHING):
					total += self._estimated_time_array[i]
				return (total / EST_TIME_SMOOTHING)

			def after_run(self, run_context, run_values):
				duration = time.time() - self._start_time
				loss_value = run_values.results

				self._estimated_time_array[self._step % EST_TIME_SMOOTHING] = float(duration * (FLAGS.max_steps - self._step)) / (60.0 * 60.0)

				if (self._step % LOGGING_STEP_INTERVAL == 0):
					log_value = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch; %.2f est. hours to completion)' \
								% (datetime.now(), self._step, loss_value, FLAGS.batch_size / duration, float(duration), self.get_estimated_completion())
					print(log_value)
					return (log_value)

		with tf.train.MonitoredTrainingSession(
			checkpoint_dir=FLAGS.train_dir,
			hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
				   tf.train.NanTensorHook(loss),
				   _LoggerHook()],
			config=tf.ConfigProto(
				log_device_placement=FLAGS.log_device_placement)) as mon_sess:
			while (not mon_sess.should_stop()):
				mon_sess.run(train_op)
示例#6
0
def get_cnn_model(model_name, bits):
    if model_name == 'vgg11':
        vgg11 = models.vgg11(pretrained=True)
        cnn_model = CNN_model.cnn_model(vgg11, model_name, bits)
    if model_name == 'alexnet':
        alexnet = models.alexnet(pretrained=True)
        cnn_model = CNN_model.cnn_model(alexnet, model_name, bits)
    if model_name == 'resnet':
        cnn_model = CNN_model.getResnetModel(bits)
    if torch.cuda.is_available():
        cnn_model = cnn_model.cuda()
    return cnn_model
示例#7
0
def run_training():
    # log dir
    logs_train_dir = logs_dir

    # read data
    test, train, validate, test_label, train_label, validate_label = rd.read_file(
    )

    # get batch
    train_batch, train_label_batch = rd.get_batch(train, train_label, IMG_W,
                                                  IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
示例#8
0
def evaluate_all_image():
    '''
    Test all image against the saved models and parameters.
    Return global accuracy of test_image_set
    ##############################################
    ##Notice that test image must has label to compare the prediction and real
    ##############################################
    '''
    # you need to change the directories to yours.
    # test_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/test/'
    # N_CLASSES = 2
    print('-------------------------')
    test, train, validate, test_label, train_label, validate_label = rd.read_file(
    )
    BATCH_SIZE = len(test)
    print('There are %d test images totally..' % BATCH_SIZE)
    print('-------------------------')
    test_batch, test_label_batch = rd.get_batch(test, test_label, IMG_W, IMG_H,
                                                BATCH_SIZE, CAPACITY)

    logits = model.inference(test_batch, BATCH_SIZE, N_CLASSES)
    testloss = model.losses(logits, test_label_batch)
    testacc = model.evaluation(logits, test_label_batch)

    logs_train_dir = logs_dir
    saver = tf.train.Saver()

    with tf.Session() as sess:
        print("Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(logs_train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')
        print('-------------------------')
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        test_loss, test_acc = sess.run([testloss, testacc])
        print('The model\'s loss is %.2f' % test_loss)
        correct = int(BATCH_SIZE * test_acc)
        print('Correct : %d' % correct)
        print('Wrong : %d' % (BATCH_SIZE - correct))
        print('The accuracy in test images are %.2f%%' % (test_acc * 100.0))
    coord.request_stop()
    coord.join(threads)
    sess.close()
示例#9
0
def find_model(args):
    if args["model"] == "cnn":
        model = CNN_model.CNN_Sentence(args)
    elif args["model"] == "gru":
        model = GRU_model.GRU_Sentence(args)
    elif args["model"] == "gru_attention":
        model = GRU_att_model.AttentionWordRNN(args)
    elif args["model"] == "gru_domain":
        model = GRU_domain_att_model.GRU_Attention_Sentence(args)
    elif args["model"] == "gru_random":
        model = GRU_ramdom_att_model.GRU_random_att_Sentence(args)
    elif args["model"] == "cnn_attention":
        model = CNN_Attention_model.CNN_Attention_Sentence(args)
    elif args["model"] == "cnn_att_pool":
        model = CNN_Att_Pool_model.CNN_Att_Pool_Sentence(args)
    elif args["model"] == "dam":
        model = DAM_model.DAM_Sentence(args)
    elif args["model"] == "gs":
        model = GS_model.GS_Sentence(args)
    else:
        print("No such model!")
        exit()
    if torch.cuda.is_available():
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args["GPU"])
        model = model.cuda(args["GPU"])
    return model
示例#10
0
def test():
    images_dir='./image/image/'
    log_dir='./log/'

    images_cat=open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested=[]

    with tf.Graph().as_default():
        #重载模型
        x=tf.placeholder(tf.float32,shape=[1,300,400,3])
        p=model.inference(x,1,10)
        logits=tf.nn.softmax(p)

        sess=tf.Session()
        tf.get_variable_scope().reuse_variables()
        ckpt=tf.train.get_checkpoint_state(log_dir)
        saver=tf.train.Saver()
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success!')
        else:
            print('Loading failed!')
        for line in images_cat.readlines():
            image_name=line.strip('\n')
            image_array=get_one_image(images_dir+image_name)
            image_array=np.reshape(image_array,[1,300,400,3])                 
  
            prediction=sess.run(logits,feed_dict={x:image_array})
            prediction=np.array(prediction,dtype='float32')
            images_tested.append([image_name,prediction])

            print(image_name)
            print(prediction)
        
        #测试单张图片
        while (True):
            test_file=input('输入测试图片:')
            if(test_file=='z'):
                break

            image_name=test_file
            image_array=get_one_image(images_dir+image_name)
            image_array=np.reshape(image_array,[1,300,400,3])
            prediction=sess.run(logits,feed_dict={x:image_array})
            prediction=np.array(prediction,dtype='float32')
            test_result=[]
            for sample in images_tested:
                distance=np.sqrt(np.sum(np.square(sample[1]-prediction)));
                distance.astype('float32')
                test_result.append([sample[0],distance])
                                
            #将结果排序
            test_result=np.array(test_result)
            test_result=test_result[np.lexsort(test_result.T)]
            for i in range(10):
                print(test_result[i][0])

            images_show(test_result)
示例#11
0
def CreateModel(model_name, bit, use_gpu):
    if model_name == 'vgg11':
        vgg11 = models.vgg16(pretrained=True)
        cnn_model = CNN_model.cnn_model(vgg11, model_name, bit)
    if model_name == 'alexnet':
        alexnet = models.alexnet(pretrained=True)
        cnn_model = CNN_model.cnn_model(alexnet, model_name, bit)
    if model_name == 'resnet34':
        resnet = models.resnet34(pretrained=True)
        cnn_model = CNN_model.cnn_model(resnet, model_name, bit)
    if model_name == 'resnet50':
        resnet = models.resnet50(pretrained=True)
        cnn_model = CNN_model.cnn_model(resnet, model_name, bit)
    if model_name == 'resnet34resnet34':
        resnet = models.resnet34(pretrained=True)
        cnn_model = CNN_model.cnn_model(resnet, model_name, bit)

    return cnn_model
示例#12
0
def grab_image():
	image_base64 = request.values['imageBase64']
	image_data = base64.decodebytes(re.sub('^data:image/.+;base64,', '', image_base64).encode())
	image = Image.open(io.BytesIO(image_data))

	# resize to 28x28, remove transparency and convert to grayscale
	#image = image.resize((28,28))
	image = remove_transparency(image).convert('L')

	image = np.asarray(image)
	#mnist dataset contains white numbers with dark backgrounds, so an inversion is necessary
	image = np.invert(image)
	image = cv2.resize(image, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)
	image = image.reshape(28,28,1)
	image = np.expand_dims(image, axis=0) #(1,28,28,1) to match model's input
	image = image.astype('float32')
	prediction = CNN_model.predict(image)
	CNN_model.clear_session()
	# im = Image.fromarray(image)
	# im.save("image.png")
	return str(prediction)
示例#13
0
def CreateModel(model_name, bit, use_gpu):
    if model_name == 'vgg16':
        vgg16 = models.vgg16(pretrained=True)
        #vgg16 = torch.load('/home/zhangjingyi/Rescode/vgg16_caffe2pytorch/vgg16_20M.pkl')
        cnn_model = CNN_model.cnn_model(vgg16, model_name, bit)
    print('**********************************')
    print('Def of poo4~poo5:')
    for i in [23, 24, 26, 28, 30]:
        print(cnn_model.features[i])
    print('**********************************')
    if use_gpu:
        cnn_model = torch.nn.DataParallel(cnn_model).cuda()
        #cnn_model = cnn_model.cuda()
    return cnn_model
示例#14
0
def main(argv=None):
    CNN_input.extract()

    images, display_images, labels = CNN_input.construct_inputs(True, True)
    logits = CNN_model.inference(False, images)

    variable_averages = tf.train.ExponentialMovingAverage(
        CNN_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    with tf.Session() as sess:
        check_point = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if check_point and check_point.model_checkpoint_path:
            saver.restore(
                sess,
                os.path.join(
                    FLAGS.train_dir,
                    list(
                        reversed(
                            check_point.model_checkpoint_path.split('\\',
                                                                    -1)))[0]))
        else:
            raise IOError("No Checkpoint file found!")

        coord = tf.train.Coordinator()
        threads = []
        try:
            for queue_runner in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    queue_runner.create_threads(sess,
                                                coord=coord,
                                                daemon=True,
                                                start=True))

            session_result = sess.run(
                [tf.nn.softmax(logits), labels, display_images])

            ResultsPlot(session_result[2], session_result[1],
                        session_result[0])

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)
        except Exception as e:
            print(e)
            coord.request_stop()
示例#15
0
文件: UI.py 项目: LC-John/AQI-CNN
 def __init__(self):
     '''
     Initialize
     '''
     self.fname = None
     self.imgs = None
     self.model = CNN_model.CNN()
     self.patch_size = (32, 32)
     self.app = QtWidgets.QApplication(sys.argv)
     self.MainWindow = QtWidgets.QMainWindow()
     self.ui = mainwindow.Ui_MainWindow()
     self.ui.setupUi(self.MainWindow)
     self.MainWindow.setFixedSize(self.MainWindow.width(), self.MainWindow.height())
     self.ui.image_label.setScaledContents(True)
     self.ui.image_label.setMargin(5)
     self.ui.path_button.clicked.connect(self.path_button_click)
     self.ui.patch_button.clicked.connect(self.patch_button_click)
     #self.ui.patch_button.clicked.connect(self.simple_patch_button_click)
     self.ui.go_button.clicked.connect(self.go_button_click)
def load_test_pred(video_path,gt_path,model_path):
    feature_size = 512 * 7 * 7

    CNN_pre_model = torchvision.models.vgg16(pretrained=True).features
    model = CNN_model.CNN_model(feature_size)
    # GPU enable
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print('Device used:', device)
    if torch.cuda.is_available():
        CNN_pre_model = CNN_pre_model.to(device)
        model = model.to(device)
    load_checkpoint(model_path,model)
    CNN_pre_model.eval()

    # -> label loading
    test_label = pd.read_csv(gt_path)["Action_labels"]

    test_features = []
    category_path = sorted(os.listdir(video_path))
    with torch.no_grad():
        for category in category_path:
            mask = pd.read_csv(gt_path)["Video_category"] == category
            test_name = pd.read_csv(gt_path)[mask]["Video_name"]
            for i,video_name in enumerate(test_name):
                print("\r%d/%d" %(i,len(test_name)),end="")
                frames = readShortVideo(video_path,category, video_name,downsample_factor=12, rescale_factor=1)
                frames = Variable(torch.from_numpy(frames)).to(device)
                tmp = CNN_pre_model(frames).cpu().view(-1, feature_size)
                test_features.append(torch.mean(tmp,0).numpy())
            print("")
            print("Processing [%s] finished!"%(category))
            print("Pre-train finished!")

    test_features = torch.Tensor(test_features)

    model.eval()
    feature = Variable(test_features).to(device)
    output = model(feature)
    pred = torch.argmax(output,1).cpu()
    print(pred.shape)
    return test_features,pred ,test_label
示例#17
0
def transfer(input_image, CNN_path, output_image, batchsize=1):
    input_image = cv2.resize(input_image, (256, 256))
    input_image = np.array(input_image)
    with tf.Session() as sess:
        batch_shape = (batchsize, ) + input_image.shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')
        preds = CNN_model.net(img_placeholder)
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(CNN_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise Exception('No such CNN_path')

        input_image = input_image[np.newaxis, ...]
        output = sess.run(preds, feed_dict={img_placeholder: input_image})
        cv2.imwrite(output_image, output[0].astype(np.int))
示例#18
0
def evaluate(run_once):
    with tf.Graph().as_default() as graph:
        images, display_images, labels = CNN_input.construct_inputs(
            True, False)

        logits = CNN_model.inference(False, images)

        top_k_op = tf.nn.in_top_k(logits, labels, 3)

        variable_averages = tf.train.ExponentialMovingAverage(
            CNN_model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
示例#19
0
def transfer(input_image, CNN_path):

    input_image = np.array(input_image) / 255.0
    tf.reset_default_graph()
    with tf.Session() as sess:
        batch_shape = (1, ) + input_image.shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')
        preds = CNN_model.net(img_placeholder)
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(CNN_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise Exception('No such CNN_path')

        input_image_4d = input_image[np.newaxis, ...]
        output = sess.run(preds, feed_dict={img_placeholder: input_image_4d})
        out_img = np.clip(output[0], 0, 255).astype(np.int)
        return out_img[0:input_image.shape[0], 0:input_image.shape[1], ...]
epoch_num = 90
batch_size = 16
max_length = 64
best_a_ccc = 0
best_v_ccc = 0
best_ccc = 0
best_epoch = 0
best_a_epoch = 0
best_v_epoch = 0
# parameter
hidden_size = 256
timesteps = 64
attention_size = 32
filters = 64

model = CN.model(timesteps, 512, hidden_size, filters, ac='sigmoid+tanh')
# model=R.model(timesteps,512,hidden_size)
# optimizer = Adam(lr=0.005, beta_1=0.5, beta_2=0.95, epsilon=1e-08)
optimizer = SGD(lr=0.005, momentum=0.5, nesterov=True, decay=0.001)
model.compile(loss='mae', optimizer=optimizer)

# # circle training
# model.load_weights('models/CNN_weights12.h5')

n_batch = int(len(X_train) / batch_size)
for epoch in range(epoch_num):
    # training step
    index = np.arange(len(X_train))
    np.random.shuffle(index)
    X_train = X_train[index]
    y_train = y_train[index, :]
示例#21
0
DataParams = collections.namedtuple('DataParams', [
    'image_height', 'image_width', 'image_channel', 'char_length',
    'num_char_class', 'batch_size'
])

data_params = DataParams(image_height=60,
                         image_width=160,
                         image_channel=1,
                         char_length=5,
                         num_char_class=10,
                         batch_size=50)

tf.reset_default_graph()

mymodel = CNN_model.ConvModel(data_params=data_params)
mymodel.build_model()

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./log/train', sess.graph)

    for i in range(500):

        imgs, lbs = giter.next_batch()
        myfeed_dict = {mymodel.inputs: imgs, mymodel.labels: lbs}
        _, loss, char_acc, str_acc, summary = sess.run([
            mymodel.train_op, mymodel.loss, mymodel.char_acc, mymodel.str_acc,
示例#22
0
from __future__ import print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

# 导入CNN模块
import CNN_model
model = CNN_model.CNNmodel()
xs = model.xs
keep_prob = model.keep_prob

# 加载保存的训练模型
import global_variable
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, global_variable.save_path)

# 测试精度
TestSize = 2
accuracyTotal = np.zeros((TestSize, 1), dtype=np.float32)
for j in range(TestSize):
    batch_xs, batch_ys = mnist.test.next_batch(int(10000 / TestSize))
    accuracyTotal[j] = sess.run(model.compute_accuracy(batch_ys),
                                feed_dict={
                                    xs: batch_xs,
                                    keep_prob: 1
                                })
示例#23
0
def test(log_dir):
    images_dir = './image/'

    images_cat = open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested = []

    with tf.Graph().as_default():
        #重载模型
        x = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        p = model.inference(x, 1, 10)
        logits = tf.nn.softmax(p)

        sess = tf.Session()
        tf.get_variable_scope().reuse_variables()
        ckpt = tf.train.get_checkpoint_state(log_dir)
        saver = tf.train.Saver()

        num_photos = 0
        outfile = open("test.txt", "w")

        for line in images_cat.readlines():
            image_name = line.strip('\n')
            image_array = get_one_image(images_dir + image_name)
            image_array = np.reshape(image_array, [1, 300, 400, 3])

            prediction = sess.run(logits, feed_dict={x: image_array})
            prediction = np.array(prediction, dtype='float32')
            images_tested.append([image_name, prediction])

            num_photos += 1
            print(num_photos)

            outfile.writelines(image_name)
            t = str(prediction)
            outfile.writelines(t)

        #测试单张图片
        while (True):
            test_file = input('输入测试图片:')
            if (test_file == 'z'):
                break

            image_name = test_file
            image_array = get_one_image(images_dir + image_name)
            image_array = np.reshape(image_array, [1, 300, 400, 3])
            prediction = sess.run(logits, feed_dict={x: image_array})
            prediction = np.array(prediction, dtype='float32')
            test_result = []
            for sample in images_tested:
                distance = np.sqrt(np.sum(np.square(sample[1] - prediction)))
                distance.astype('float32')
                test_result.append([sample[0], distance])

            #将结果排序
            test_result = np.array(test_result)
            test_result = test_result[np.lexsort(test_result.T)]
            for i in range(11):
                print(test_result[i][0])

            images_show(test_result)
示例#24
0
if not os.path.exists(root_dir): os.makedirs(root_dir)

checkpoint_dir = os.path.join(root_dir, version +
                              '_train')  #os.path.dirname(checkpoint_path)
if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir)
checkpoint_path = os.path.join(checkpoint_dir, 'cp-{epoch:04d}.ckpt')

cp_callback = [
    tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                       verbose=0,
                                       save_weights_only=True,
                                       period=5),
    tf.keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")
]

model, model_archi = CNN_model.CNN(width, height, depth, len(genres))
model.save_weights(checkpoint_path.format(epoch=0))

history = model.fit(
    train_images,
    train_labels,
    epochs=epochs,
    callbacks=cp_callback,
    validation_data=(test_images, test_labels),
    #validation_split=0.2,
    shuffle=True,
    batch_size=batch_size,
    verbose=1)

#latest = tf.train.latest_checkpoint(checkpoint_dir)
# Save the weights
@author: Administrator
"""
import tensorflow as tf
import random
import numpy as np
import DataManager as dm, Logger, CNN_model

log = Logger.get_logger("Model_1", "./log/Model_1.log")
weight_path = './weight/Model_1/Model_1.ckpt'

# tf Graph input
X = tf.compat.v1.placeholder("float", [None, 64 * 64])
Y = tf.compat.v1.placeholder("float", [None, 150])

#use convolutional neural network to extract features
feature_layer = CNN_model.CNN(X, Y)

#use fully connected neural network to classify
classification_layer = CNN_model.full_connected_layer(feature_layer)

# loss_softmax is the only loss function used in model 1
loss_softmax = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=classification_layer,
                                               labels=Y))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001, epsilon=1)
train_op = optimizer.minimize(loss_softmax)

# TensorBoard
tf.compat.v1.summary.scalar("loss", loss_softmax)
merged_summary_op = tf.compat.v1.summary.merge_all()
示例#26
0
def main():
    # Initialize Model
    model = CNN_model.fnBuildModel()

    #If user passes a single image to check if the model works
    if args.test is not None:
        print("---------- In Testing mode")
        image = cv2.imread(args.test)
        image = fu.preprocessing(image)
        image = np.expand_dims(image, axis=0)
        y = np.expand_dims(np.asarray([0]), axis=0)
        BatchSize = 1
        model.fit(image, y, epochs=400, \
                batch_size=BatchSize, \
                validation_split=0.1, \
                shuffle=True, verbose=0)
        return

    X_filename = 'X_train.npy'
    Y_filename = 'Y_train.npy'
    X_train = np.load(X_filename)
    Y_train = np.load(Y_filename)
    print(X_train.shape)
    print(Y_train.shape)

    print("Training started...........")

    arrCallbacks = []
    earlystop_callback = EarlyStopping(monitor='val_loss',
                                       patience=5,
                                       verbose=0)
    batch_print_callback = LambdaCallback(
        on_batch_begin=lambda batch, logs: batch)
    epoch_print_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: epoch)
    tensorboard = TensorBoard(log_dir='./Graphfinal2',
                              histogram_freq=0,
                              write_graph=True,
                              write_images=True)
    arrCallbacks.append(earlystop_callback)
    arrCallbacks.append(batch_print_callback)
    arrCallbacks.append(epoch_print_callback)
    arrCallbacks.append(tensorboard)

    BatchSize = 50
    hist =  model.fit(X_train, Y_train, epochs=500, \
            batch_size=BatchSize, \
            validation_split=0.3, \
            shuffle=True, verbose=0, \
            callbacks=arrCallbacks)

    model.save_weights('my_model_weights.h5')
    #scores = model.evaluate(X_train, Y_train, verbose=0)
    # model result:
    train_val_accuracy = hist.history
    # Get and print training accuracy
    train_accuracy = train_val_accuracy['acc']
    # Get and print validation accuracy
    val_accuracy = train_val_accuracy['val_acc']
    print("Done!")
    print("Train acc: %.3f" % train_accuracy[-1])
    print("Validation acc: %.3f" % val_accuracy[-1])

    #print ("Train loss : %.3f" % scores[0])
    #print ("Train accuracy : %.3f" % scores[1])
    print("Training finished")
示例#27
0
valid_data_lbls = [item[1] for item in valid_data]
# create arrays for us in models
X_train = np.array(train_data_imgs).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
Y_train = train_data_lbls
x_valid = np.array(valid_data_imgs).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_valid = valid_data_lbls

# Image Augmentation (flip along vertical axis and random rotations (max 25 degrees)
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
'''             Model execution (training or loading)                '''
# model = CNN_model.cnn(img_size=IMG_SIZE, lr=lr)                                       # basic CNN
# model = CNN_model.resnet(img_size=IMG_SIZE, lr=lr, n=2)                               # simple resnet
model = CNN_model.conv_res_integrated(
    img_size=IMG_SIZE, lr=lr, n=1,
    img_aug=img_aug)  # integrated CNN with res layers

# Ask user to load or train new model
# For first time need to process but subsequently can load old model
# UNLESS any parameters are changed
print(
    'Would you like to load pre-existing trained model (L) or train a new one (T)?'
)
decision2 = input()
if decision2 == 'T':
    model.fit(X_train,
              Y_train,
              n_epoch=epochs,
              validation_set=(x_valid, y_valid),
              snapshot_step=500,
def main():
    # parameters
    feature_size = 512 * 7 * 7
    learning_rate = 0.0001
    num_epochs = 51
    batch_size = 64

    # create the save log file
    print("Create the directory")
    if not os.path.exists("./save"):
        os.makedirs("./save")
    if not os.path.exists("./logfile"):
        os.makedirs("./logfile")
    if not os.path.exists("./logfile/CNN"):
        os.makedirs("./logfile/CNN")

    # load my Dataset
    train_dataset = CNN_Dataset.CNN_Dataset(mode="train")
    test_dataset = CNN_Dataset.CNN_Dataset(mode="valid")

    print('the train_dataset has %d size.' % (len(train_dataset.data)))
    print('the valid_dataset has %d size.' % (len(test_dataset.data)))

    # Pre-train models
    CNN_pre_model = torchvision.models.vgg16(pretrained=True).features
    model = CNN_model.CNN_model(feature_size)

    # GPU enable
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print('Device used:', device)
    if torch.cuda.is_available():
        CNN_pre_model = CNN_pre_model.to(device)
        model = model.to(device)

    ##################################################################
    #########        pre-train model vgg16
    ##################################################################
    # train_dataset = (num of data ,data/label ,frames ,3 ,224 ,224)

    CNN_pre_model.eval()
    train_features = []
    with torch.no_grad():
        for i in range(len(train_dataset.data)):
            print("\r%d/%d" % (i, len(train_dataset.data)), end="")
            input = train_dataset[i][0]
            input = input.to(device)
            tmp = CNN_pre_model(input).cpu().view(-1, feature_size)
            train_features.append(torch.mean(tmp, 0).numpy())

        print(" Pre-train train_data finished!")

    test_features = []
    with torch.no_grad():
        for i in range(len(test_dataset.data)):
            print("\r%d/%d" % (i, len(test_dataset.data)), end="")
            input = test_dataset[i][0]
            input = input.to(device)
            tmp = CNN_pre_model(input).cpu().view(-1, feature_size)
            test_features.append(torch.mean(tmp, 0).numpy())

        print(" Pre-train test_data finished!")

    # update dataset
    train_features = torch.Tensor(train_features)
    train_label = torch.LongTensor(train_dataset.label)
    train_features_dataset = data.TensorDataset(train_features, train_label)
    test_features = torch.Tensor(test_features)
    test_label = torch.LongTensor(test_dataset.label)
    test_features_dataset = data.TensorDataset(test_features, test_label)

    train_loader = DataLoader(train_features_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=8)
    test_loader = DataLoader(test_features_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=8)

    # setup optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           betas=(0.5, 0.999))

    criterion = nn.CrossEntropyLoss()

    train_loss_list = []
    train_acc_list = []
    test_loss_list = []
    test_acc_list = []

    print("Starting training...")
    best_accuracy = -np.inf
    for epoch in range(num_epochs):
        start = time.time()
        model.train()
        print("Epoch:", epoch + 1)
        epoch_train_loss = 0.0
        train_acc = 0.0

        if (epoch + 1) == 11:
            optimizer.param_groups[0]['lr'] /= 2

        if (epoch + 1) == 20:
            optimizer.param_groups[0]['lr'] /= 2

        for i, (feature, label) in enumerate(train_loader):
            feature = Variable(feature).to(device)
            label = Variable(label).to(device)
            optimizer.zero_grad()
            output = model(feature)
            train_loss = criterion(output, label)
            train_loss.backward()
            optimizer.step()

            epoch_train_loss += train_loss.item()

            # Accuracy
            output_label = torch.argmax(output, 1).cpu()

            acc = np.mean((output_label == label.cpu()).numpy())
            train_acc += acc
            print('Epoch [%d/%d], Iter [%d/%d] loss %.4f,Acc %.4f, LR = %.6f' %
                  (epoch, num_epochs, i + 1, len(train_loader),
                   train_loss.item(), acc, optimizer.param_groups[0]['lr']))

        if (epoch) % 10 == 0:
            save_checkpoint('./save/CNN-%03i.pth' % (epoch), model, optimizer)

        # testing
        with torch.no_grad():

            model.eval()
            epoch_test_loss = 0.0
            test_acc = 0.
            for i, (feature, label) in enumerate(test_loader):
                feature = Variable(feature).to(device)
                label = Variable(label).to(device)
                output = model(feature)
                test_loss = criterion(output, label)
                predict = torch.argmax(output, 1).cpu()
                acc = np.mean((predict == label.cpu()).numpy())
                epoch_test_loss += test_loss.item()
                test_acc += acc

        print(
            '\n============\nEpoch [%d/%d] ,Train: Loss: %.4f | Acc: %.4f ,Validation: loss: %.4f | Acc: %.4f'
            % (epoch, num_epochs, epoch_train_loss / len(train_loader),
               train_acc / len(train_loader), epoch_test_loss /
               len(test_loader), test_acc / len(test_loader)))

        # save loss data
        train_loss_list.append(epoch_train_loss / len(train_loader))
        train_acc_list.append(train_acc / len(train_loader))
        test_loss_list.append(epoch_test_loss / len(test_loader))
        test_acc_list.append(test_acc / len(test_loader))

        if (test_acc / len(test_loader) > best_accuracy):
            best_accuracy = test_acc / len(test_loader)
            save_checkpoint('./save/CNN-%03i.pth' % (epoch), model, optimizer)
            print('Save best model , test_acc = %.6f)...' % (best_accuracy))

        print('-' * 88)

    with open('./logfile/CNN/train_loss.pkl', 'wb') as f:
        pickle.dump(train_loss_list, f)
    with open('./logfile/CNN/train_acc.pkl', 'wb') as f:
        pickle.dump(train_acc_list, f)
    with open('./logfile/CNN/test_loss.pkl', 'wb') as f:
        pickle.dump(test_loss_list, f)
    with open('./logfile/CNN/test_acc.pkl', 'wb') as f:
        pickle.dump(test_acc_list, f)
示例#29
0
        #    test_file_name = "../MSdata/MGF/artificialData3"
        test_spectra_loc = data.inspect_file_location("mgf", test_file_name)
        with open(test_file_name, "r") as test_file:
            test_data, _ = data.read_random_stack_cnn(
                test_file, "mgf", test_spectra_loc, data_utils.test_stack_size)
    #    test_bucket_sizes =

    # Training Data
#        file_name = "../MSdata/MGF/peaks.db.10k.yeast.mgf"
#    file_name = "../MSdata/MGF/excludeYeast_cross.cat.mgf.train.repeat"
        file_name = "../MSdata/MGF/artificialData2sorted"
        spectra_locations = data.inspect_file_location("mgf", file_name)
        file_handle = open(file_name, "r")

        # Model
        my_model = CNN_model.cnnModel(test_sess)
        test_sess.run(tf.global_variables_initializer())

        # epochs
        epochs = 50  #200
        epoch_loss = []
        for epoch in range(epochs):

            dataset, dataset_len = data.read_random_stack_cnn(
                file_handle, "mgf", spectra_locations,
                data_utils.train_stack_size)

            training_set_size = dataset_len

            spectra_used = 0
def run_training():
    train_dir = "./image/"
    logs_train_dir = "./log_" + str(MAX_STEP) + "_cap_" + str(CAPACITY)

    train, train_label = image_P.get_files(train_dir)
    train_batch, train_label_batch = image_P.get_batch(train, train_label,
                                                       IMG_W, IMG_H,
                                                       BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()

    config = tf.ConfigProto(allow_soft_placement=True,
                            gpu_options=tf.GPUOptions(
                                per_process_gpu_memory_fraction=0.7,
                                allow_growth=True))

    sess = tf.Session(config=config)
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    tf.train.start_queue_runners(sess=sess)

    for step in np.arange(9999999):
        sess.run([train_op, train_loss, train_acc])

        print(step)

        if step == MAX_STEP:
            print("Finished")
            break

    log_dir = train_dir

    images_dir = './image/'

    images_cat = open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested = []

    num_photos = 0
    outfile = open("test-new-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt",
                   "w")

    filelines = images_cat.readlines()
    for line in filelines:
        image_name = line.strip('\n')
        image_array = get_one_image(images_dir + image_name)
        image_array = np.reshape(image_array, [1, 300, 400, 3])

        xName = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        prediction = sess.run(train_logits, feed_dict={xName: image_array})
        prediction = np.array(prediction, dtype='float32')
        images_tested.append([image_name, prediction])

        num_photos += 1
        print("Test:" + str(num_photos))

        outfile.writelines(image_name)
        t = str(prediction)
        outfile.writelines(t)
        outfile.close()
        outfile = open(
            "test-new-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")

    outfile.close()
    outfile2 = open(
        "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "w")
    outfile2.write("result = {\n")
    outfile2.close()
    num_photos = 0
    for line in filelines:
        num_photos += 1
        print("Find Near:" + str(num_photos))
        image_name = line.strip('\n')
        image_array = get_one_image(images_dir + image_name)
        image_array = np.reshape(image_array, [1, 300, 400, 3])
        outfile2 = open(
            "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")
        outfile2.write("'" + image_name + "': [\n")
        xName = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        prediction = sess.run(train_logits, feed_dict={xName: image_array})
        prediction = np.array(prediction, dtype='float32')

        test_result = []
        for sample in images_tested:
            distance = np.sqrt(np.sum(np.square(sample[1] - prediction)))
            distance.astype('float32')
            test_result.append([sample[0], distance])

        #将结果排序
        test_result = np.array(test_result)
        test_result = test_result[np.lexsort(test_result.T)]
        for i in range(11):
            outfile2.write("'" + test_result[i][0] + "', ")
        outfile2.write("],\n")
        outfile2.close()

    outfile2 = open(
        "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")
    outfile2.write("}\n")
    outfile2.close()

    sess.close()