Exemplo n.º 1
0
train_acc_values = []
train_cost_values = []
train_lr_values = []
train_recall_values = []
valid_acc_values = []
valid_cost_values = []
valid_recall_values = []

config = tf.ConfigProto()

# if we are freezing some layers adjust the steps per epoch since we will do one extra training step
if freeze:
    steps_per_epoch -= 1

## train the model
with tf.Session(graph=graph, config=config) as sess:
    if log_to_tensorboard:
        train_writer = tf.summary.FileWriter('./logs/tr_' + model_name, sess.graph)
        test_writer = tf.summary.FileWriter('./logs/te_' + model_name)
    
    # create the saver
    saver = tf.train.Saver()
    sess.run(tf.local_variables_initializer())

    # If the model is new initialize variables, else restore the session
    if init:
        sess.run(tf.global_variables_initializer())
        print("Initializing model...")
    else:
        # if we are initializing with the weights from another model load it
        if init_model is not None:
Exemplo n.º 2
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('-r', '--run', default='')
	args = parser.parse_args()
	if args.run == "train":
		print("Retraining model...")
		n_episodes = 2000
		max_eps_length = 300
		reload_model = False
		save_data = True
	else:
		print("Testing model")
		n_episodes = 1
		max_eps_length = 0
		reload_model = True
		save_data = False

	# Training parameters
	print_iters = 200
	test_steps = 50
	test_freq = 20
	avg_steps = 1
	episodes_counter = 0

	min_buffer_size = 128
	max_buffer_size = 1000
	gamma = 0.99
	alpha = 0.0001
	epsilon = 0.05

	##--------------------- Initialize environment and TF graph
	env = gym.make('CartPole-v0')
	mean_length_list, mean_return_list, loss_list = [], [], []

	state, action, td_target, reward, eps, rand_state, is_terminal = init_vars()
	opt, q, loss = main_network(state, action, td_target, alpha=alpha)
	target = target_network(state, reward, eps, rand_state, is_terminal)
	init = tf.global_variables_initializer()
	saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
	exp_buffer = Buffer(min_buffer_size, max_buffer_size, epsilon)
	copy_counter = 0

	with tf.Session() as sess:
		sess.run(init)

		for episode_i in range(n_episodes):
			t0 = time.time()
			end = False
			episode_return = 0
			copy_counter += 1
			s = env.reset()  # initial state
			return_list, length_list = [], []
			l_total = 0

			if reload_model:
				print("Reloading model...")
				folder = '../../models/part1/sarsa'
				saver = tf.train.import_meta_graph(folder+'/tf_model.meta')
				saver.restore(sess, tf.train.latest_checkpoint(folder+'/'))
				all_vars = tf.get_collection('vars')

			#------------- Perform updates and sample experiences (epsilon-greedy)
			for t in range(max_eps_length):
				q_now = sess.run(q, feed_dict={state: s.reshape(1, 4)})
				a = epsilon_greedy(q_now, epsilon, env)
				s_prime, _, done, info = env.step(a)
				if done:
					r = -1
					end = True
					term = 0
				else:
					r = 0
					term = 1

				# Update buffer and perform mini-batch Q update if enough examples
				exp_buffer.update(s, a, r, s_prime, term)
				if exp_buffer.check_size():
					opt_dict, targ_dict = exp_buffer.random_sample(state, action, reward, eps, rand_state, is_terminal)
					targ = sess.run(target, feed_dict=targ_dict)
					opt_dict[td_target] = targ
					_, l = sess.run([opt, loss], feed_dict=opt_dict)
					l_total += l

				# Update target network parameters
				if copy_counter == 5:
					train_vars = tf.trainable_variables()  # 1st 4 are main net, 2nd 4 are target net
					update_target_net(train_vars, sess)
					copy_counter = 0

				# Either end or continue from next state
				if end:
					if episode_i % test_freq == 0:
						loss_list.append(l_total/t)
					break
				s = s_prime
				#----------------------------------------------------------------------------

			# --------- Evaluate performance over test_steps episodes (using greedy policy)
			if episode_i % test_freq == 0:
				for i in range(test_steps):
					s = env.reset()
					for t in range(300):
						q_now = sess.run(q, feed_dict={state: s.reshape(1, 4)})
						a = np.argmax(q_now)
						s_prime, _, done, info = env.step(a)
						if done:
							episode_length = t + 1
							episode_return = -1 * gamma ** t
							length_list.append(episode_length)
							return_list.append(episode_return)
							break
						s = s_prime

				mean_length = np.mean(np.array(length_list))
				mean_return = np.mean(np.array(return_list))
				std_length = np.std(np.array(length_list))
				std_return = np.std(np.array(return_list))
				mean_length_list.append(mean_length)
				mean_return_list.append(mean_return)
				# save_path = saver.save(sess, '../../models/part1/sarsa/tf_model')
			# --------------------------------------------------------------------------

			if episode_i % print_iters == 0:
				print_tuple = (test_steps, mean_length, mean_return, std_length, std_return)
				print("#---After %d test episodes: Mean length = %f, mean return = %f, sd length = %f, sd return = %f" % print_tuple)
			episodes_counter += 1

		sess.close()

	# Save final results to CSV file
	if save_data:
		output_data = OrderedDict()
		output_data['episode'] = range(0, n_episodes, test_freq)
		output_data['length'] = mean_length_list
		output_data['return'] = mean_return_list
		output_data['loss'] = loss_list
		df = pd.DataFrame.from_dict(output_data)
		df.to_csv('results/sarsa_nn_results.csv', index=False)
Exemplo n.º 3
0
# -*- encoding: UTF-8 -*-
import tensorflow as tf

# 1. 定义计算图,不定义时会使用默认计算图tf.get_default_graph()
# 2. 在计算图中定义张量,指向计算结果
a = tf.constant([1.0, 2.0], name='a')
b = tf.constant([3.0, 4.0], name='b')
ab = a + b
# 3. 在会话中运行计算图,默认运行默认计算图
with tf.Session() as sess:
    result = sess.run(ab)
# 4. 获取结果
print result
Exemplo n.º 4
0
def usage():
    sess = tf.Session()
    generator = LDR2HDR_Net(fc_dim=64, im_height=64, deconv_method='upsample')
    x = tf.placeholder(tf.float32, [32, 64, 128, 3], name='InputImage')
    outImg, sunPos, fc = generator.pred(inputs=x, isTraining=True)
Exemplo n.º 5
0
            if not(isLearned) and self.thread_type is 'test':    # test threadを止めておく
                time.sleep(1.0)

            if isLearned and self.thread_type is 'learning':     # learning threadを止めておく
                time.sleep(3.0)

            if isLearned and self.thread_type is 'test':     # test threadが走る
                time.sleep(3.0)
                self.environment.run()


# -- main ここからメイン関数です------------------------------
# M0.global変数の定義と、セッションの開始です
frames = 0              # 全スレッドで共有して使用する総ステップ数
isLearned = False       # 学習が終了したことを示すフラグ
SESS = tf.Session()     # TensorFlowのセッション開始

# M1.スレッドを作成します
with tf.device("/cpu:0"):
    brain = Brain()     # ディープニューラルネットワークのクラスです
    threads = []     # 並列して走るスレッド
    # 学習するスレッドを用意
    for i in range(N_WORKERS):
        thread_name = "local_thread"+str(i+1)
        threads.append(Worker_thread(thread_name=thread_name, thread_type="learning", brain=brain))

    # 学習後にテストで走るスレッドを用意
    threads.append(Worker_thread(thread_name="test_thread", thread_type="test", brain=brain))

# M2.TensorFlowでマルチスレッドを実行します
COORD = tf.train.Coordinator()                  # TensorFlowでマルチスレッドにするための準備です
Exemplo n.º 6
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        config = tf.ConfigProto()
        config.allow_soft_placement = True
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
Exemplo n.º 7
0
import pyexcel


NamesArray = [["Amani", "absent"],["Anas", "absent"],["Aya", "absent"],["Basant", "absent"],["Emy", "absent"],["Farah", "absent"],["Karim", "absent"],["Lamis", "absent"],["Lina", "absent"],["Mahmoud", "absent"],["Nour", "absent"],["Nouran", "absent"],["Omar", "absent"],["OmarH", "absent"],["Omneia", "absent"],["Rana", "absent"],["Reem", "absent"],["Salma", "absent"],["Samar", "absent"],["Samy", "absent"],["Shrouk", "absent"],["Tarek", "absent"]]

#input_video="CCEK.MOV"
input_video="C:\ProgramData\Anaconda3\Lib\site-packages/CCECTest5.mp4"

modeldir = 'C:\ProgramData\Anaconda3\Lib\site-packages/model/20170511-185253.pb'
classifier_filename = 'C:\ProgramData\Anaconda3\Lib\site-packages/class/classifier.pkl'
npy='C:\ProgramData\Anaconda3\Lib\site-packages'
train_img="C:\ProgramData\Anaconda3\Lib\site-packages/ImagesCCEC2"

with tf.Graph().as_default():
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
    with sess.as_default():
        pnet, rnet, onet = detect_face.create_mtcnn(sess, npy)

        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        #factor = 0.709  # scale factor
        #factor = 0.36  # scale factor
        factor =0.41   #CCEL
        
        margin = 44
        frame_interval = 3
        batch_size = 1000
        image_size = 182
        input_image_size = 160
        
Exemplo n.º 8
0
def generic_graph(opts, data, trainFlag):
    graph = tf.Graph()
    training = trainFlag == util.Modes.TRAIN
    mode_name = 'training' if training else 'validation'
    batches_per_step = opts.batches_per_step if training else opts.validation_batches_per_step
    # When replicating, we divide the data stream into N streams, so we only need to do 1/N batches in each stream.
    # For this reason, batches_per_step must be a minimum of N.
    batches_per_step = int(batches_per_step / opts.replication_factor)

    with graph.as_default():
        dataset, placeholders = data.get_dataset(opts, mode=trainFlag)
        kwargs = {} if opts.replication_factor == 1 else {
            'replication_factor': opts.replication_factor
        }
        infeed = ipu_infeed_queue.IPUInfeedQueue(
            dataset, f"{mode_name}_dataset_infeed", **kwargs)

        with ipu_scope(f'/device:IPU:0'):

            def comp_fn():
                def body(total_loss, total_rmse, batch):
                    loss, rmse, grad_op = graph_builder(
                        opts,
                        observed=batch[:, :-1],
                        ground_truth=tf.expand_dims(batch[:, -1], axis=1),
                        learning_rate=placeholders['learning_rate']
                        if training else None,
                        mode=trainFlag)
                    if not training:
                        return total_loss + loss, total_rmse + rmse
                    with tf.control_dependencies([grad_op]):
                        return total_loss + loss, total_rmse + rmse

                return loops.repeat(
                    batches_per_step, body,
                    [tf.constant(0, getattr(np, opts.dtypes[0]))] * 2, infeed)

            outputs = ipu_compiler.compile(comp_fn, [])

        # Average them over batches per step
        avg_loss, avg_rmse = [x / batches_per_step for x in outputs]

        # Add relevant things to the tf.summary for both
        if training:
            tf.summary.scalar("loss", avg_loss)
            tf.summary.scalar("learning_rate", placeholders["learning_rate"])
        tf.summary.scalar(f"RMSPE/{mode_name}", avg_rmse)
        summary = tf.summary.merge_all()
        saver = tf.train.Saver()

        ipu_utils.move_variable_initialization_to_cpu()
        init = tf.global_variables_initializer()

        report = None
        if opts.compiler_report:
            if training:
                summary_ops.ipu_compile_summary('compile_summary', avg_loss)
            with tf.device('cpu'):
                print('Initializing training report...')
                report = gen_ipu_ops.ipu_event_trace()

    writer = tf.summary.FileWriter(opts.logs_path + f'/{mode_name}',
                                   graph=graph,
                                   flush_secs=30)

    # Attach to IPUs and configure system
    # Subprocesses must set up IPU systems in their own scopes, then use their devices as IPU:0
    if (not training and opts.multiprocessing) or training:
        config = ipu_utils.create_ipu_config(
            profiling=training,
            use_poplar_text_report=True,
            max_cross_replica_sum_buffer_size=10000000,
            max_inter_ipu_copies_buffer_size=10000000)
        if opts.select_ipus == 'AUTO':
            config = ipu_utils.auto_select_ipus(config,
                                                [opts.replication_factor])
        else:
            config = ipu_utils.select_ipus(config,
                                           [opts.select_ipus[not training]])
        config = ipu_utils.set_compilation_options(
            config, {"prng.enable": str(opts.prng).lower()})
        ipu_utils.configure_ipu_system(config)

    graph_outputs = ([avg_loss] if training else [avg_rmse]) + [summary]
    sess = tf.Session(graph=graph)
    return GraphOps(graph, sess, init, graph_outputs,
                    placeholders if training else None, infeed, saver, writer,
                    report, trainFlag)
Exemplo n.º 9
0
def keypoints(choice):
    frameST = st.empty()
    with tf.Session() as sess:                
        model_cfg, model_outputs = posenet.load_model(101, sess)
        output_stride = model_cfg['output_stride']
        model = pickle.load(open("RNNmodel.pkl","rb"))
        col =['nose_xCoord', 'nose_yCoord','leftEye_xCoord', 'leftEye_yCoord', 'rightEye_xCoord', 'rightEye_yCoord', 'leftEar_xCoord', 'leftEar_yCoord', 'rightEar_xCoord', 'rightEar_yCoord', 'leftShoulder_xCoord', 'leftShoulder_yCoord', 'rightShoulder_xCoord', 'rightShoulder_yCoord', 'leftElbow_xCoord', 'leftElbow_yCoord', 'rightElbow_xCoord', 'rightElbow_yCoord', 'leftWrist_xCoord', 'leftWrist_yCoord', 'rightWrist_xCoord', 'rightWrist_yCoord', 'leftHip_xCoord', 'leftHip_yCoord', 'rightHip_xCoord', 'rightHip_yCoord', 'leftKnee_xCoord', 'leftKnee_yCoord', 'rightKnee_xCoord', 'rightKnee_yCoord', 'leftAnkle_xCoord', 'leftAnkle_yCoord', 'rightAnkle_xCoord', 'rightAnkle_yCoord']
        dummy_frame = pd.DataFrame(columns = col)
        Mean = np.array([148.66119491, 305.30049224, 142.6646119 , 310.25743596,
       141.70017197, 302.93952183, 141.27736617, 317.44867283,
       139.46369719, 297.02198297, 163.55299211, 318.43891733,
       161.08800759, 288.89443786, 201.92641401, 327.4968114 ,
       198.14737829, 268.80913136, 227.76026519, 321.95571328,
       222.86624951, 266.31670662, 231.37762059, 301.21041038,
       229.25918355, 278.40873512, 279.47660651, 299.97468079,
       275.04189452, 267.78987458, 332.20440178, 298.95615126,
       326.76802751, 265.76282502])
        Std = np.array([ 96.38779959, 127.16401286,  97.13470333, 129.87910693,
        95.74375047, 127.46249832,  93.7858246 , 129.05961629,
        90.41257012, 125.11638235,  87.46516144, 123.3528153 ,
        82.68437065, 115.98756097,  93.88162078, 126.86459209,
        88.92850539, 111.58051582, 106.44469339, 126.27066327,
       104.85191381, 111.8015252 ,  86.30759081, 119.30815504,
        84.94167523, 113.47366614,  91.67524166, 121.34462458,
        90.18384711, 115.57534719, 105.70870987, 131.18502572,
       104.6401585 , 126.9626246 ])
        if choice == 'Webcam':
            cap = cv2.VideoCapture(0)
            cap.set(3, 500)
            cap.set(4, 500)        
        elif choice == 'upload video':
            cap = cv2.VideoCapture('test.mp4')
        frame_count = 0
        result = 'Loading..'
        ### for writing text on frame
        font = cv2.FONT_HERSHEY_PLAIN 
        org = (0, 50)
        fontScale = 2
        color = (0, 0, 255) ### colour on BGR
        thickness = 2
        while True:
            input_image, display_image, output_scale = posenet.read_cap(
                cap, scale_factor=0.7125, output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs,
                feed_dict={'image:0': input_image}
            )
            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=1,   ## detectiong only single position
                min_pose_score=0.15)
            keypoint_coords *= output_scale
            # TODO this isn't particularly fast, use GL for drawing and display someday...
            overlay_image = posenet.draw_skel_and_kp(
                display_image, pose_scores, keypoint_scores, keypoint_coords,
                min_pose_score=0.15, min_part_score=0.1)
            #cv2.imshow('posenet', overlay_image)
 
            nose_xCoord = keypoint_coords[0][0][0]
            nose_yCoord = keypoint_coords[0][0][1]
            leftEye_xCoord = keypoint_coords[0][1][0]
            leftEye_yCoord = keypoint_coords[0][1][1]
            rightEye_xCoord = keypoint_coords[0][2][0]
            rightEye_yCoord = keypoint_coords[0][2][1]
            leftEar_xCoord = keypoint_coords[0][3][0]
            leftEar_yCoord = keypoint_coords[0][3][1]
            rightEar_xCoord = keypoint_coords[0][4][0]
            rightEar_yCoord = keypoint_coords[0][4][1]
            leftShoulder_xCoord = keypoint_coords[0][5][0]
            leftShoulder_yCoord = keypoint_coords[0][5][1]
            rightShoulder_xCoord = keypoint_coords[0][6][0]
            rightShoulder_yCoord = keypoint_coords[0][6][1]
            leftElbow_xCoord = keypoint_coords[0][7][0]
            leftElbow_yCoord = keypoint_coords[0][7][1]
            rightElbow_xCoord = keypoint_coords[0][8][0]
            rightElbow_yCoord = keypoint_coords[0][8][1]
            leftWrist_xCoord = keypoint_coords[0][9][0]
            leftWrist_yCoord = keypoint_coords[0][9][1]
            rightWrist_xCoord = keypoint_coords[0][10][0]
            rightWrist_yCoord = keypoint_coords[0][10][1]
            leftHip_xCoord = keypoint_coords[0][11][0]
            leftHip_yCoord = keypoint_coords[0][11][1]
            rightHip_xCoord = keypoint_coords[0][12][0]
            rightHip_yCoord = keypoint_coords[0][12][1]
            leftKnee_xCoord = keypoint_coords[0][13][0]
            leftKnee_yCoord = keypoint_coords[0][13][1]
            rightKnee_xCoord = keypoint_coords[0][14][0]
            rightKnee_yCoord = keypoint_coords[0][14][1]
            leftAnkle_xCoord = keypoint_coords[0][15][0]
            leftAnkle_yCoord = keypoint_coords[0][15][1]
            rightAnkle_xCoord = keypoint_coords[0][16][0]
            rightAnkle_yCoord = keypoint_coords[0][16][1]
            d ={'nose_xCoord': nose_xCoord, 'nose_yCoord': nose_yCoord, 'leftEye_xCoord': leftEye_xCoord, 'leftEye_yCoord': leftEye_yCoord, 'rightEye_xCoord': rightEye_xCoord, 'rightEye_yCoord': rightEye_yCoord, 'leftEar_xCoord': leftEar_xCoord, 'leftEar_yCoord': leftEar_yCoord, 'rightEar_xCoord': rightEar_xCoord, 'rightEar_yCoord': rightEar_yCoord, 'leftShoulder_xCoord': leftShoulder_xCoord, 'leftShoulder_yCoord': leftShoulder_yCoord, 'rightShoulder_xCoord': rightShoulder_xCoord, 'rightShoulder_yCoord': rightShoulder_yCoord, 'leftElbow_xCoord': leftElbow_xCoord, 'leftElbow_yCoord': leftElbow_yCoord, 'rightElbow_xCoord': rightElbow_xCoord, 'rightElbow_yCoord': rightElbow_yCoord, 'leftWrist_xCoord': leftWrist_xCoord, 'leftWrist_yCoord': leftWrist_yCoord, 'rightWrist_xCoord': rightWrist_xCoord, 'rightWrist_yCoord': rightWrist_yCoord, 'leftHip_xCoord': leftHip_xCoord, 'leftHip_yCoord': leftHip_yCoord, 'rightHip_xCoord': rightHip_xCoord, 'rightHip_yCoord': rightHip_yCoord, 'leftKnee_xCoord': leftKnee_xCoord, 'leftKnee_yCoord': leftKnee_yCoord, 'rightKnee_xCoord': rightKnee_xCoord, 'rightKnee_yCoord': rightKnee_yCoord, 'leftAnkle_xCoord': leftAnkle_xCoord, 'leftAnkle_yCoord': leftAnkle_yCoord, 'rightAnkle_xCoord': rightAnkle_xCoord, 'rightAnkle_yCoord': rightAnkle_yCoord}
            dummy_frame = dummy_frame.append(pd.DataFrame(data = d , index = [frame_count]))
            #dummy_frame = pd.DataFrame(data = d, columns = col, index = [frame_count])
            #print(dummy_frame.shape)
            if (((frame_count+1) % 30 == 0) and (frame_count+1 >= 120)):
                #sc=StandardScaler() 
                #X = np.zeros((120,34))
                X = (dummy_frame.values)[(frame_count-119):(frame_count+1)]
                X = (X-Mean)/Std
                #print(X)
                #print(X.shape)
                X = np.asarray(X).reshape(-1,120,34)
                #print(X)
                #print(X.shape)
                #X = tf.transpose(X, [1,0,2])
                #X = tf.reshape(X,[-1, 34])
                #print(X.shape)
                result= model.predict_classes(X) ## predicting from the model
                prob = model.predict_proba(X)
                if (result == 0):
                    result = 'Falling'
                elif (result == 1):
                    result = 'Pushups'
                elif (result == 2):
                    result = 'Sitting'
                elif (result == 3):
                    result = 'Walking'
                else:
                    result = 'Error'
                #st.write(prob)
            cv2.putText(overlay_image, 'Action: '+result, org, font, fontScale, color, thickness, cv2.LINE_AA, False) 
            frameST.image(overlay_image, channels="BGR")
            frame_count += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                frameST = st.empty()
                cap.release()
                cv2.destroyAllWindows()
                break
            if choice == 'upload video':
                if frame_count == int(cap.get(cv2.CAP_PROP_FRAME_COUNT)):# total frames considered are (video_length_sec)*(frames/sec)
                    frameST = st.empty()
                    cap.release()
                    cv2.destroyAllWindows()
                    break
Exemplo n.º 10
0
from keras.layers import Conv2D, Softmax, MaxPooling2D, Dropout
from keras.layers import Flatten, Dense, Activation, BatchNormalization, InputLayer, GaussianNoise
from keras.models import Sequential, load_model, Model
from keras.optimizers import Adam, SGD, RMSprop
from keras.callbacks import ModelCheckpoint
from keras import metrics
from keras import regularizers
from data_gen import DataGenerator
import cv2
import matplotlib.pyplot as plt
import numpy as np
import functools
from cat_parse import get_vector_from_cats
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)


data_root = '/home/ad0915/Desktop/CVFinalDataset/best-artworks-of-all-time/images'
data_root = pathlib.Path(data_root)


all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)

labels = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
labels_to_idx = dict((name, idx) for idx, name in enumerate(labels))

all_image_labels = [labels_to_idx[pathlib.Path(path).parent.name] for path in all_image_paths]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.layers import Concatenate
from keras.optimizers import Adam

# In[2]:

# backend
import tensorflow as tf
from keras import backend as k

# Don't pre-allocate memory; allocate as-needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

# Create a session with the above options specified.
k.tensorflow_backend.set_session(tf.Session(config=config))

# In[3]:

# Hyperparameters
batch_size = 128
num_classes = 10
epochs = 1000

# In[4]:


def add_module(input):
    #print(input.shape)

    Conv2D_reduce = Conv2D(16, (1, 1),
Exemplo n.º 12
0
    )
    args = parser.parse_args()

    print('loading onnx model')
    onnx_model = onnx.load(args.input)
    export_path = args.output

    onnx.checker.check_model(onnx_model)

    print('prepare tf model')
    tf_rep = prepare(onnx_model)

    if path.exists(export_path):
        shutil.rmtree(export_path)

    with tf.Session() as persisted_sess:
        print("load graph")
        persisted_sess.graph.as_default()
        tf.import_graph_def(tf_rep.graph.as_graph_def(), name='')

        i_tensors = []
        o_tensors = []
        inputs = {}
        outputs = {}

        for i in tf_rep.inputs:
            t = persisted_sess.graph.get_tensor_by_name(
                tf_rep.tensor_dict[i].name)
            i_tensors.append(t)
            tensor_info = tf.saved_model.utils.build_tensor_info(t)
            inputs[t.name.split(':')[0].lower()] = tensor_info
Exemplo n.º 13
0
def main(unused_argv):
    assert FLAGS.train_dir, "--train_dir is required"

    np.random.seed(1)

    model_config = configuration.ModelConfig()
    training_config = configuration.TrainingConfig()

    # Create training directory.
    train_dir = FLAGS.train_dir
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    # Build The tf Graph
    g = tf.Graph()
    with g.as_default():
        # Build the model
        with tf.variable_scope("train"):
            model = sim_model.SimModel(model_config, mode="train")
            model.build()
        #TODO(error ? placeholder)
        # with tf.variable_scope("train",reuse=True):
        #   eval_model = sim_model.SimModel(model_config,mode="eval")
        #   eval_model.build()

        # Set up the learning rate.
        learning_rate_decay_fn = None
        learning_rate = tf.constant(training_config.initial_learning_rate)
        if training_config.learning_rate_decay_factor > 0:
            num_batches_per_epoch = (training_config.num_examples_per_epoch /
                                     model_config.batch_size)
            decay_steps = int(num_batches_per_epoch *
                              training_config.num_epochs_per_decay)

            def _learning_rate_decay_fn(learning_rate, global_step):
                return tf.train.exponential_decay(
                    learning_rate,
                    global_step,
                    decay_steps=decay_steps,
                    decay_rate=training_config.learning_rate_decay_factor,
                    staircase=True)

            learning_rate_decay_fn = _learning_rate_decay_fn

        optimizer = tf.train.AdamOptimizer(learning_rate,
                                           beta1=0.9,
                                           beta2=0.999,
                                           epsilon=1.0)
        # Set up the training ops.
        train_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)
        #################
        # Summary
        #################
        for var in tf.trainable_variables():
            tf.summary.histogram("params/" + var.op.name, var)
        # Set up the Saver
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)
        # restore = tf.train.Saver()
    # Run training.
    with g.as_default():
        global_step = model.global_step

        init = tf.global_variables_initializer()

        dev_summary_op = tf.summary.merge_all()
        train_summary_op = dev_summary_op
        ##################
        # session config
        ##################
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.35)
        sess = tf.Session(config=tf.ConfigProto(
            device_count={"CPU": 4},  # limit to num_cpu_core CPU usage  
            intra_op_parallelism_threads=2,
            inter_op_parallelism_threads=2,
            gpu_options=gpu_options,
            allow_soft_placement=True))
        sess.run(init)
        ###################
        # debug
        # https://www.tensorflow.org/programmers_guide/debugger
        # `run -f has_inf_or_nan`
        ###################
        if FLAGS.debug == True:
            sess = tf_debug.LocalCLIDebugWrapperSession(sess)
            sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
        ###################
        # Restore checkpoint
        ####################
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Successfully loaded model from %s' %
                  ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)

        summary_writer = tf.summary.FileWriter(os.path.join(
            FLAGS.train_dir, "summaries", "train"),
                                               graph=sess.graph)
        dev_summary_writer = tf.summary.FileWriter(os.path.join(
            FLAGS.train_dir, "summaries", "dev"),
                                                   graph=sess.graph)
        #TODO should read data
        test_reader = reader.Test_batch()
        step = 0
        for feats in reader.batch_inputs():
            step = step + 1
            if step > FLAGS.number_of_steps: break

            start_time = time.time()
            feed_dict = {
                model.input_seqs: feats[0],
                model.input_mask: feats[1],
                # model.feat:feats[2],
                model.labels: feats[2],
            }

            loss_value, acc_value = sess.run([train_op, model.acc], feed_dict)
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 50 == 0:
                # examples_per_sec = model_config.batch_size / float(duration)
                format_str = ('%s: step %d, loss = %.2f ,acc = %.2f')
                print(format_str %
                      (datetime.now(), step, np.mean(loss_value), acc_value))

            if step % 200 == 0:
                summary_str = sess.run(train_summary_op, feed_dict)
                summary_writer.add_summary(summary_str, step)

            if step % 400 == 0:
                dev_data = test_reader.next()
                feed_dict = {
                    model.input_seqs: dev_data[0],
                    model.input_mask: dev_data[1],
                    # model.feat:dev_data[2],
                    model.labels: dev_data[2],
                }

                dev_summary_str = sess.run(dev_summary_op, feed_dict)
                dev_summary_writer.add_summary(dev_summary_str, step)

            if step % 5000 == 0 or (step + 1) == FLAGS.number_of_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Exemplo n.º 14
0
def main():
    # Parse the command line input
    # more about parser see: https://www.cnblogs.com/zknublx/p/6106343.html
    parser = argparse.ArgumentParser(description='Train SE-Net')

    parser.add_argument('--name',
                        default='/share/donghao/demo6/' + exp +
                        '/trained_models/' + path_ease + '/senet',
                        help='project name')
    # todo do not change!
    parser.add_argument('--v1',
                        type=str2bool,
                        default=23,
                        help='v1=18: res-18 | v1=34: res-34')
    # todo do not change!
    parser.add_argument('--attention-module',
                        type=str,
                        default='se_block',
                        help='input se_block or sese_block or others')
    # todo do not change! no matters
    parser.add_argument(
        '--softmax-flag',
        type=str2bool,
        default='0',
        help='only for sese_block | 0=single channel 1=all channel')
    # todo do not change!
    parser.add_argument('--switch',
                        type=int,
                        default=0,
                        help='0=standard | 1=pre | 2=identity')

    parser.add_argument('--train-or-not',
                        type=str2bool,
                        default='1',
                        help='train flag')

    parser.add_argument('--validate-or-not',
                        type=str2bool,
                        default='0',
                        help='validate flag')

    parser.add_argument('--test_models-or-not',
                        type=str2bool,
                        default='1',
                        help='test_models flag')

    parser.add_argument('--data-dir',
                        default='/share/donghao/demo5/' + exp + '/np_dataset',
                        help='dataset info directory')
    # todo
    parser.add_argument('--epochs',
                        type=int,
                        default=epoch,
                        help='number of training epochs')  # todo fine-tune
    # todo
    parser.add_argument('--batch-size',
                        type=int,
                        default=batch,
                        help='batch size')  # todo fixed set it as 2^n
    # todo
    parser.add_argument('--tensorboard-dir',
                        default='/share/donghao/demo6/' + exp + '/logs/' +
                        path_ease + '/senet_tb',
                        help='name of the tensorboard data directory')
    # todo
    parser.add_argument('--pb-model-save-path',
                        default='/share/donghao/demo6/' + exp +
                        '/trained_models/' + path_ease + '/senet_pb',
                        help='pb model dir')
    # todo for t0-exp-senet tag_string='channel01_1d_resnet_pb_model'
    parser.add_argument('--tag-string',
                        default='senet_pb',
                        help='tag string for model')

    parser.add_argument('--checkpoint-interval',
                        type=int,
                        default=1,
                        help='checkpoint interval')

    parser.add_argument('--max-to-keep',
                        type=int,
                        default=2,
                        help='num of checkpoint files max to keep')

    parser.add_argument('--weight-decay',
                        type=float,
                        default=0.0001,
                        help='weight decay fro bn')

    parser.add_argument('--lr-decay-method-switch',
                        type=int,
                        default=1,
                        help='0=piecewise|others=exponential')

    parser.add_argument('--lr-values',
                        type=str,
                        default='0.001;0.0005;0.0001;0.00001',
                        help='learning rate values')  # todo piecewise

    parser.add_argument('--lr-boundaries',
                        type=str,
                        default='353481;706962;2120886',
                        help='learning rate change boundaries (in batches)')

    parser.add_argument('--lr-value',
                        type=float,
                        default=0.0001,
                        help='learning rate for exp decay')  # todo exp decay

    parser.add_argument('--decay-steps',
                        type=float,
                        default=num_iteration,
                        help='decay_steps=1 epoch')

    parser.add_argument('--decay-rate',
                        type=float,
                        default=0.99,
                        help='decay rate: for 100 epoch -> lr=0.0001')

    parser.add_argument('--moving-average-decay',
                        type=float,
                        default=0.9999,
                        help='moving avg decay')

    parser.add_argument('--momentum',
                        type=float,
                        default=0,
                        help='momentum for the optimizer')  # todo 0.9

    parser.add_argument('--adam', type=float, default=1,
                        help='adam optimizer')  # todo 控制变量进行和单一channel进行对比实验

    parser.add_argument('--adagrad',
                        type=float,
                        default=0,
                        help='adagrad optimizer')

    parser.add_argument('--rmsprop',
                        type=float,
                        default=0,
                        help='rmsprop optimizer')

    parser.add_argument('--num-samples-for-test_models-summary',
                        type=int,
                        default=1000,
                        help='range in 0-10044')

    parser.add_argument('--confusion-matrix-normalization',
                        type=str2bool,
                        default='1',
                        help='confusion matrix norm flag')

    parser.add_argument('--class-names',
                        type=list,
                        default=[
                            np.str_('N'),
                            np.str_('S'),
                            np.str_('V'),
                            np.str_('F'),
                            np.str_('Q')
                        ],
                        help='...')
    # todo
    parser.add_argument('--continue-training',
                        type=str2bool,
                        default=continue_training,
                        help='continue training flag')

    args = parser.parse_args()

    print('[i] Project name:                 ', args.name)
    print('[i] Model categories(1=v1|0=v2):  ', args.v1)
    print('[i] Attention module categories): ', args.attention_module)
    print('[i] Softmax flag(for sese):       ', args.softmax_flag)
    print('[i] Attention switch:             ', args.switch)
    print('[i] Train or not:                 ', args.train_or_not)
    print('[i] Validate or not:              ', args.validate_or_not)
    print('[i] Test or not:                  ', args.test_or_not)
    print('[i] Data directory:               ', args.data_dir)
    print('[i] epochs:                       ', args.epochs)
    print('[i] Batch size:                   ', args.batch_size)
    print('[i] Tensorboard directory:        ', args.tensorboard_dir)
    print('[i] Pb model save path:           ', args.pb_model_save_path)
    print('[i] Tag string:                   ', args.tag_string)
    print('[i] Checkpoint interval:          ', args.checkpoint_interval)
    print('[i] Checkpoint max2keep:          ', args.max_to_keep)
    print('[i] Weight decay(bn):             ', args.weight_decay)
    print('[i] Learning rate decay switch    ', args.lr_decay_method_switch)
    print('[i] Learning rate values:         ', args.lr_values)
    print('[i] Learning rate boundaries:     ', args.lr_boundaries)
    print('[i] Learning rate value(exp):     ', args.lr_value)
    print('[i] Decay steps:                  ', args.decay_steps)
    print('[i] Decay rate:                   ', args.decay_rate)
    print('[i] Moving average decay:         ', args.moving_average_decay)
    print('[i] Momentum:                     ', args.momentum)
    print('[i] Adam:                         ', args.adam)
    print('[i] Adagrad:                      ', args.adagrad)
    print('[i] Rmsprop:                      ', args.rmsprop)
    print('[i] Num of samples for test_models       ',
          args.num_samples_for_test_summary)
    print('[i] Confusion matrix norm:        ',
          args.confusion_matrix_normalization)
    print('[i] Class names:                  ', args.class_names)
    print('[i] Continue training:            ', args.continue_training)

    # Find an existing checkpoint & continue training...
    start_epoch = 0
    if args.continue_training:
        state = tf.train.get_checkpoint_state(checkpoint_dir=args.name,
                                              latest_filename=None)
        if state is None:
            print('[!] No network state found in ' + args.name)
            return 1
        # check ckpt path
        ckpt_paths = state.all_model_checkpoint_paths
        if not ckpt_paths:
            print('[!] No network state found in ' + args.name)
            return 1

        # find the latest checkpoint file to go on train-process...
        last_epoch = None
        checkpoint_file = None
        for ckpt in ckpt_paths:
            # os.path.basename return the final component of a path
            # for e66.ckpt.data-00000-of-00001 we got ckpt_num=66
            ckpt_num = os.path.basename(ckpt).split('.')[0][1:]
            try:
                ckpt_num = int(ckpt_num)
            except ValueError:
                continue
            if last_epoch is None or last_epoch < ckpt_num:
                last_epoch = ckpt_num
                checkpoint_file = ckpt

        if checkpoint_file is None:
            print('[!] No checkpoints found, cannot continue!')
            return 1

        metagraph_file = checkpoint_file + '.meta'

        if not os.path.exists(metagraph_file):
            print('[!] Cannot find metagraph', metagraph_file)
            return 1
        start_epoch = last_epoch
    else:
        metagraph_file = None
        checkpoint_file = None
        try:
            print('[i] Creating directory             {}...'.format(args.name))
            os.makedirs(args.name)
        except IOError as e:
            print('[!]', str(e))
            return 1

    print('[i] Configuring the training data...')
    try:
        td = TrainingData(args.data_dir, args.batch_size)
        print('[i] training samples:             ', td.num_train)
        print('[i] validation samples:           ', td.num_valid)
        print('[i] classes:                      ', td.num_classes)
        print('[i] ecg_chip size:                ',
              f'({td.sample_width}, {td.sample_length})')
    except (AttributeError, RuntimeError) as e:
        print('[!] Unable to load training data:', str(e))
        return 1

    print('[i] Training ...')
    with tf.Session(config=config) as sess:
        if start_epoch != 0:
            print('[i] Building model from metagraph...')
            xs, ys = build_from_metagraph(sess, metagraph_file,
                                          checkpoint_file)
            loss, accuracy, y_predict, train_op = build_optimizer_from_metagraph(
                sess)
        else:
            print('[i] Building model for dual channel...')
            xs, ys, y_predict, loss, accuracy, train_op = \
                build_model_and_optimizer(args.softmax_flag, td.num_classes, td.sample_width,
                                          td.sample_length, td.sample_channel,
                                          args.moving_average_decay,
                                          args.lr_decay_method_switch, args.lr_values,
                                          args.lr_boundaries, args.lr_value, args.decay_steps,
                                          args.decay_rate, args.adam, args.momentum,
                                          args.adagrad, args.rmsprop, args.v1, attention_module=args.attention_module,
                                          switch=args.switch, weight_decay=args.weight_decay)

        # todo a typical wrong implement of initializer for a "reload" model
        # init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        # sess.run(init_op)
        # todo right way to do this: initialize w&b as the last update value...
        initialize_uninitialized_variables(sess)
        # create various helpers
        summary_writer = tf.summary.FileWriter(args.tensorboard_dir,
                                               sess.graph)
        saver = tf.train.Saver(max_to_keep=args.max_to_keep)
        saver1 = tf.train.Saver(max_to_keep=args.max_to_keep)

        # build summaries
        train_precision = PrecisionSummary(sess, summary_writer, 'train', td.num_train_iter, args.continue_training) \
            if args.train_or_not else None
        valid_precision = PrecisionSummary(sess, summary_writer, 'valid', td.num_valid_iter, args.continue_training) \
            if args.validate_or_not else None
        test_precision = PrecisionSummary(sess, summary_writer, 'test_models', td.num_test_iter, args.continue_training) \
            if args.test_or_not else None
        train_loss = LossSummary(sess, summary_writer, 'train', td.num_train_iter, args.continue_training) \
            if args.train_or_not else None
        valid_loss = LossSummary(sess, summary_writer, 'valid', td.num_valid_iter, args.continue_training) \
            if args.validate_or_not else None
        test_loss = LossSummary(sess, summary_writer, 'test_models', td.num_test_iter, args.continue_training) \
            if args.test_or_not else None
        # self, session, writer, process_flag, restore=False
        train_excitation = ExcitationSummary(sess, summary_writer, 'train', args.attention_module, args.continue_training, path_ease) \
            if args.train_or_not else None
        valid_excitation = ExcitationSummary(sess, summary_writer, 'valid', args.attention_module, args.continue_training, path_ease) \
            if args.validate_or_not else None
        test_excitation = ExcitationSummary(sess, summary_writer, 'test_models', args.attention_module, args.continue_training, path_ease) \
            if args.test_or_not else None
        # set saved_model builder
        if start_epoch != 0:
            builder = tf.saved_model.builder.SavedModelBuilder(
                args.pb_model_save_path + f'_{start_epoch}')
        else:
            builder = tf.saved_model.builder.SavedModelBuilder(
                args.pb_model_save_path)

        print('[i] Training...')
        max_acc = 0
        # if train the first time, start_epoch=0 else start_epoch=last_epoch(from checkpoint file...)
        for e in range(start_epoch, args.epochs):
            # Train ->
            train_cache = []
            train_flag_lst = [0, 1, 2, 3, 4]
            if args.train_or_not:
                td.train_iter(process='train', num_epoch=args.epochs)
                description = '[i] Train {:>2}/{}'.format(
                    e + 1, args.epochs)  # epoch_No/total_epoch_No -> e+1/200
                for _ in tqdm(iterable=td.train_tqdm_iter,
                              total=td.num_train_iter,
                              desc=description,
                              unit='batches'):
                    x, y = sess.run(
                        td.train_sample)  # array(?,1,512,2) array(?,5)
                    train_dict = {xs: x, ys: y}
                    _, acc, los = sess.run([train_op, accuracy, loss],
                                           feed_dict=train_dict)
                    # sample
                    if train_flag_lst:  # if not find all yet
                        dense_y = np.argmax(y, axis=1)  # (?,)
                        for index, ele in enumerate(dense_y):
                            if ele in train_flag_lst:
                                train_cache.append(
                                    (x[index],
                                     ele))  # example(1,512,2) label()
                                train_flag_lst.remove(ele)
                                break
                    # add for precision & ce loss
                    train_precision.add(acc=acc)
                    train_loss.add(values=los)
            # Validate ->
            validate_cache = []
            validate_flag_lst = [0, 1, 2, 3, 4]
            if args.validate_or_not:
                td.valid_iter(process='validate', num_epoch=args.epochs)
                description = '[i] Valid {:>2}/{}'.format(e + 1, args.epochs)
                for _ in tqdm(iterable=td.valid_tqdm_iter,
                              total=td.num_valid_iter,
                              desc=description,
                              unit='batches'):
                    x, y = sess.run(
                        td.valid_sample)  # array(?,1,512,2) array(?,)
                    validate_dict = {xs: x, ys: y}
                    acc, los = sess.run([accuracy, loss],
                                        feed_dict=validate_dict)
                    # sample
                    if validate_flag_lst:  # if not find all yet
                        dense_y = np.argmax(y, axis=1)  # (?,)
                        for index, ele in enumerate(dense_y):
                            if ele in train_flag_lst:
                                validate_cache.append(
                                    (x[index],
                                     ele))  # example(1,512,2) label()
                                validate_flag_lst.remove(ele)
                                break
                    # add for precision & ce loss
                    valid_precision.add(acc=acc)
                    valid_loss.add(values=los)
            # Test ->
            test_cache = []
            test_flag_lst = [0, 1, 2, 3, 4]
            if args.test_or_not:
                td.test_iter(process='test_models', num_epoch=args.epochs)
                description = '[i] Test {:>2}/{}'.format(e + 1, args.epochs)
                for _ in tqdm(iterable=td.test_tqdm_iter,
                              total=td.num_test_iter,
                              desc=description,
                              unit='batches'):
                    x, y = sess.run(
                        td.test_sample)  # array(?,1,512,2) array(?,5)
                    test_dict = {xs: x, ys: y}
                    acc, los = sess.run([accuracy, loss], feed_dict=test_dict)
                    # sample
                    if test_flag_lst:  # if not find all yet
                        dense_y = np.argmax(y, axis=1)  # (?,)
                        for index, ele in enumerate(dense_y):
                            if ele in test_flag_lst:
                                test_cache.append(
                                    (x[index],
                                     ele))  # example(1,512,2) label()
                                test_flag_lst.remove(ele)
                                break
                    # add for precision & ce loss
                    test_precision.add(acc=acc)
                    test_loss.add(values=los)
            # check
            if not args.train_or_not and not args.validate_or_not and not args.test_or_not:
                exit('[!] No procedures implemented!')

            # todo check
            # sess.graph.finalize()
            # todo push & flush tb
            # self, train, valid, test_models, train_lst, valid_lst, test_lst, xs, epoch
            if args.train_or_not:
                train_excitation.push(train_cache, xs, e)
                train_precision.push(e)
                train_loss.push(e)
            if args.validate_or_not:
                valid_excitation.push(validate_cache, xs, e)
                valid_precision.push(e)
                valid_loss.push(e)
            if args.test_or_not:
                test_excitation.push(test_cache, xs, e)
                test_precision.push(e)
                test_loss.push(e)

            # flush all(summaries of loss/precision/ecg_chip & summaries of ecgnet) protocol buf into disk
            summary_writer.flush()

            # save checkpoint
            if (e + 1) % args.checkpoint_interval == 0:
                checkpoint = '{}/e{}.ckpt'.format(args.name, e + 1)
                saver.save(sess, checkpoint)
                print('[i] Checkpoint saved:', checkpoint)
            avg_acc = test_precision.precision_cache
            # todo 这个脚本的功能是找到准确率最高的模型 ^-^
            if (e + 1) % args.checkpoint_interval == 0 and avg_acc >= max_acc:
                checkpoint2 = '{}/highest/e{}.ckpt'.format(args.name, e + 1)
                saver1.save(sess, checkpoint2)
            # refresh max_acc
            max_acc = avg_acc if avg_acc > max_acc else max_acc

        # close writer
        summary_writer.close()

        # after all epochs goes out, save pb model...
        print('[i] Saving pb model(after training steps goes up)...')
        builder.add_meta_graph_and_variables(sess, [args.tag_string])
        builder.save()
        print('[i] programme finished!')
Exemplo n.º 15
0
def get_session():
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    return session
Exemplo n.º 16
0
def main():
    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    #task_index = FLAGS.task_index
    # Define variable  定义变量
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, name="x")

    with tf.name_scope('weights'):
        target_w = tf.Variable(2.0, name='target_w')
        w_list = [tf.Variable(2.0, name='target_w') for i in range(2)]
        w = w_list[0]

    with tf.name_scope('output'):
        y = tf.multiply(x, w, name='y')

    with tf.name_scope('real_output'):
        y_ = tf.placeholder(tf.float32, name="y_")

    with tf.name_scope('gradient'):
        loss = tf.reduce_mean(tf.square(y_ - y))  # MSE loss 和值求平均(损失函数)

    # specify optimizer  指定优化器
    with tf.name_scope('train'):
        # optimizer is an "operation" which we can execute in a session
        # 优化器是一个我们能够执行会话的操作
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
            loss)
    # create a summary for network gradients  为网络梯度创建一个摘要
    init_op = tf.initialize_all_variables()

    with tf.Session() as sess:

        sess.run(init_op)
        epoch = 0
        start = datetime.datetime.now()
        for epoch in range(100000):
            init_w = sess.run(w)
            start_time_epoch = datetime.datetime.now()
            for i in range(3):
                start_time = datetime.datetime.now()
                # print("task%d - epoch%d: " % (task_index, epoch), '  ')
                x_i = i
                y_real = 10 + i

                _, loss_i = sess.run([optimizer, loss],
                                     feed_dict={
                                         x: x_i,
                                         y_: y_real
                                     })

                #loss_i = sess.run(loss, feed_dict={x: x_i, y_: y_real})

                end_time = datetime.datetime.now()
                # print (
                # "start_time: " + str(start_time) + ", x_i: " + str(x)  + ", y_real: " + str(
                #     y_real) + ", loss_i: " + str(loss_i)  + ", end_time:" + str(end_time))

            loss2 = sess.run(loss, feed_dict={x: x_i, y_: y_real})
            end_time = str(datetime.datetime.now())
            new_w = sess.run(w)

            print(
                "start_time:%s    end_time:%s     gradient:%s   Final States of weight in epoch%d: "
                % (start_time_epoch, end_time, str(new_w - init_w), epoch),
                new_w)
            epoch += 1

    end = datetime.datetime.now()
    print(end - start)
    pass
def main(_):
    # the path to save models
    save_path = './model/'

    print('reading wordembedding')
    wordembedding = np.load('./data/vec.npy')   #字向量表
    # print("wordembedding",wordembedding)
    # print('len(wordembedding)',len(wordembedding))

    print('reading training data')
    train_y = np.load('./data/train_y.npy')   #分类标签数组  即训练集关系向量数组
    # print('train_y',train_y)
    train_word = np.load('./data/train_word.npy')  #每句话种每个字的id数组
    # print('train_word', train_word)
    train_pos1 = np.load('./data/train_pos1.npy')   #第一个实体嵌入位置数组
    # print('train_pos1', train_pos1)
    train_pos2 = np.load('./data/train_pos2.npy')  #第二个实体嵌入位置数组
    # print('train_pos2', train_pos2)
    train_rel = np.load('./data/train_rel.npy')
    # print('train_rel',train_rel)

    settings = network.Settings()   #调用Seetings设置变量
    settings.vocab_size = len(wordembedding)     #结果为16117
    settings.num_classes = len(train_y[0])       #结果为4

    big_num = settings.big_num
    keep_prob = settings.keep_prob
    print('keep_prob', keep_prob)

    with tf.Graph().as_default():   #tf.Graph() 表示实例化了一个类,tf.Graph().as_default() 表示将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图,

        sess = tf.Session()  #运行Tensorflow操作的类, tf.Session():创建一个会话, tf.Session().as_default()创建一个默认会话
        with sess.as_default():   #执行操作

            initializer = tf.contrib.layers.xavier_initializer()  #初始化权重矩阵
            with tf.variable_scope("model", reuse=None, initializer=initializer):  #tf.variable_scope用于定义创建变量(层)的操作的上下文管理器. reuse=None继承父范围的重用标志
                m = network.GRU(is_training=True, word_embeddings=wordembedding, settings=settings)    #调用GRU
            global_step = tf.Variable(0, name="global_step", trainable=False)   #定义全局步骤变量  不可训练
            optimizer = tf.train.AdamOptimizer(0.0005)    #优化算法  优化器optimizer

            train_op = optimizer.minimize(m.final_loss, global_step=global_step) #添加操作节点,用于最小化loss
            sess.run(tf.global_variables_initializer())  #初始化模型的参数
            saver = tf.train.Saver(max_to_keep=None)   #实例化对象 用于保存模型 max_to_keep的值为None表示保存所有的checkpoint文件,默认值为5

            merged_summary = tf.summary.merge_all()   #用于管理summary  将所有summary全部保存到磁盘 以便tensorboard显示
            summary_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/train_loss', sess.graph)  #指定一个文件用来保存图

            def train_step(word_batch, pos1_batch, pos2_batch, y_batch,rel_batch, big_num):  #训练每一步

                feed_dict = {}
                total_shape = []
                total_num = 0
                total_word = []
                total_pos1 = []
                total_pos2 = []
                total_rel = []
                # print(word_batch)  50条句子
                for i in range(len(word_batch)):
                    # print(len(word_batch))  结果为1
                    total_shape.append(total_num)

                    total_num += len(word_batch[i])
                    for word in word_batch[i]:  #向下取一层
                        total_word.append(word)
                    for pos1 in pos1_batch[i]:
                        total_pos1.append(pos1)
                    for pos2 in pos2_batch[i]:
                        total_pos2.append(pos2)
                    for rel in rel_batch[i]:
                        total_rel.append(rel)
                total_shape.append(total_num)
                # print(total_num)
                # print(total_shape)
                # print(len(total_shape))
                # print(total_word)
                # print(len(total_word))
                # print(total_pos1)
                # print(len(total_pos1))
                # print(total_pos2)
                # print(len(total_pos2))
                # exit()
                # for i in range(len(total_rel)):
                #     # a = total_rel[i]
                #     print(type(total_rel))
                #     print(type(total_rel[0]))
                #     print(len(total_rel[i]))
                #     while len(total_rel[i]) !=70:
                #         total_rel[i].append(16116)
                total_shape = np.array(total_shape)   #[0,1,2,3...50]
                total_word = np.array(total_word)  #50条句子里 每条句子中70个字中 每个字的id 长度为50
                total_pos1 = np.array(total_pos1)  #50条句子里 每条句子中70个字中 每个字到第一个实体位置的距离 长度为50
                total_pos2 = np.array(total_pos2)  #50条句子里 每条句子中70个字中 每个字到第二个实体位置的距离 长度为50
                total_rel = np.array(total_rel)

                feed_dict[m.total_shape] = total_shape
                feed_dict[m.input_word] = total_word
                feed_dict[m.input_pos1] = total_pos1
                feed_dict[m.input_pos2] = total_pos2
                feed_dict[m.input_y] = y_batch   #50条句子的关系向量数组
                feed_dict[m.input_rel] = total_rel
                # print(total_shape)
                # print(len(total_shape))
                # print(total_word)
                # print(len(total_word[0]))
                # print(total_pos1)
                # print(len(total_pos1[0]))
                # print(y_batch)
                # print(len(y_batch))
                # print(total_rel)
                # print(len(total_rel[0]))
                # exit()
                temp, step, loss, accuracy, summary, l2_loss, final_loss = sess.run(
                    [train_op, global_step, m.total_loss, m.accuracy, merged_summary, m.l2_loss, m.final_loss],
                    feed_dict)

                time_str = datetime.datetime.now().isoformat()
                accuracy = np.reshape(np.array(accuracy), (big_num))
                acc = np.mean(accuracy)
                summary_writer.add_summary(summary, step)
                # print('step',step)
                if step % 50 == 0:
                #if step % 50 == 0:
                    tempstr = "{}: step {}, softmax_loss {:g}, acc {:g}".format(time_str, step, loss, acc)
                    print(tempstr)

            for one_epoch in range(settings.num_epochs):
                # print('one_epoch',one_epoch)
                temp_order = list(range(len(train_word)))  #train_word中存的是每个句子中每个字的id数组,长度是句子总数{0,1....866}
                # print('temp_order',temp_order)
                np.random.shuffle(temp_order)   #打乱顺序函数
                # print('temp_order', temp_order)
                # print('len(temp_order)',len(temp_order))
                for i in range(int(len(temp_order) / float(settings.big_num))): #每次最多丢进去50个实体对 一共需要丢多少次
                    # print('i',i)
                    temp_word = []
                    temp_pos1 = []
                    temp_pos2 = []
                    temp_y = []
                    temp_rel = []

                    temp_input = temp_order[i * settings.big_num:(i + 1) * settings.big_num]
                    # print('temp_input',temp_input)
                    for k in temp_input:
                        temp_word.append(train_word[k])
                        temp_pos1.append(train_pos1[k])
                        temp_pos2.append(train_pos2[k])
                        temp_y.append(train_y[k])   #关系向量数组
                        temp_rel.append(train_rel[k])
                    num = 0
                    for single_word in temp_word:
                        # print(len(single_word[0]))  结果为70
                        num += len(single_word)

                    if num > 1500:
                        print('out of range')
                        continue

                    temp_word = np.array(temp_word)
                    temp_pos1 = np.array(temp_pos1)
                    temp_pos2 = np.array(temp_pos2)
                    temp_y = np.array(temp_y)
                    temp_rel = np.array(temp_rel)

                    train_step(temp_word, temp_pos1, temp_pos2, temp_y, temp_rel,settings.big_num)

                    current_step = tf.train.global_step(sess, global_step)  #global_step代表全局步数,比如在多少步该进行什么操作,现在神经网络训练到多少轮等等,类似于一个钟表。
                    #if current_step > 8000 and current_step % 100 == 0:
                    # print('current_step',current_step)
                    # if current_step > 80 and current_step % 5== 0:
                    if current_step > 300 and current_step % 10 == 0:
                        # print('saving model')
                        path = saver.save(sess, save_path + 'ATT_GRU_model', global_step=current_step)
                        tempstr = 'have saved model to ' + path
                        print(tempstr)
Exemplo n.º 18
0
def train():
  """Train CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    global_step = tf.Variable(0, trainable=False)

    # Get images and labels for CIFAR-10.
    images, labels = cifar10.distorted_inputs()

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate loss.
    loss = cifar10.loss(logits, labels)

    # Build a Graph that trains the model with one batch of examples and
    # updates the model parameters.
    train_op = cifar10.train(loss, global_step)

    # Create a saver.
    saver = tf.train.Saver(tf.all_variables())

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    # Build an initialization operation to run below.
    init = tf.initialize_all_variables()

    # Start running operations on the Graph.
    sess = tf.Session(config=tf.ConfigProto(
        log_device_placement=FLAGS.log_device_placement))
    sess.run(init)

    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)

    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                            graph_def=sess.graph_def)

    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      _, loss_value = sess.run([train_op, loss])
      duration = time.time() - start_time

      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

      if step % 10 == 0:
        num_examples_per_step = FLAGS.batch_size
        examples_per_sec = num_examples_per_step / float(duration)
        sec_per_batch = float(duration)

        format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                      'sec/batch)')
        print (format_str % (datetime.now(), step, loss_value,
                             examples_per_sec, sec_per_batch))

      if step % 100 == 0:
        summary_str = sess.run(summary_op)
        summary_writer.add_summary(summary_str, step)

      # Save the model checkpoint periodically.
      if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)
Exemplo n.º 19
0
def train(modelParams, epochNumber):
    # import corresponding model name as model_cnn, specifed at json file
    model_cnn = importlib.import_module('Model_Factory.' +
                                        modelParams['modelName'])

    if not os.path.exists(modelParams['dataDir']):
        raise ValueError("No such data directory %s" % modelParams['dataDir'])

    _setupLogging(os.path.join(modelParams['logDir'], "genlog"))

    with tf.Graph().as_default():
        # track the number of train calls (basically number of batches processed)
        globalStep = tf.get_variable('globalStep', [],
                                     initializer=tf.constant_initializer(0),
                                     trainable=False)
        # Get images inputs for model_cnn.
        if modelParams['phase'] == 'v':
            filename, pngTemp, targetT = data_input.inputs_vali(**modelParams)
        else:
            input_data = data_input.inputs(**modelParams)
        print('Input        ready')
        #TEST###        filenametest, pngTemptest, targetTtest = data_input.inputs_test(**modelParams)

        # Build a Graph that computes the HAB predictions from the
        # inference model
        output_res = model_cnn.inference_l2reg(input_data['image'],
                                               **modelParams)
        # loss model
        #loss = model_cnn.loss_l2reg(output_res, input_data, **modelParams)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        #opTrain = model_cnn.train(loss, globalStep, **modelParams)
        ##############################
        print('Testing     ready')
        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())
        print('Saver        ready')

        # Build the summary operation based on the TF collection of Summaries.
        summaryOp = tf.summary.merge_all()
        print('MergeSummary ready')
        # Build an initialization operation to run below.
        #init = tf.initialize_all_variables()
        #        init = tf.global_variables_initializer()

        #opCheck = tf.add_check_numerics_ops()
        # Start running operations on the Graph.
        config = tf.ConfigProto(
            log_device_placement=modelParams['logDevicePlacement'])
        config.gpu_options.allow_growth = True
        config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
        sess = tf.Session(config=config)
        print('Session      ready')

        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
        #        sess.run(init)

        # restore a saver.
        print('Loading Ex-Model with epoch number %d ...', epochNumber)
        print('     ',
              modelParams['trainLogDir'] + '_v/model.ckpt-' + str(epochNumber))
        saver.restore(
            sess,
            (modelParams['trainLogDir'] + '_v/model.ckpt-' + str(epochNumber)))
        #saver.restore(sess, (modelParams['trainLogDir']+'_30k/model.ckpt-29000'))
        print('Ex-Model     loaded')

        if True:
            # if True: freeze graph
            tf.train.write_graph(sess.graph.as_graph_def(),
                                 '.',
                                 modelParams['trainLogDir'] + '_v/model.pbtxt',
                                 as_text=True)
            # Output nodes
            output_node_names = [
                n.name for n in tf.get_default_graph().as_graph_def().node
            ]
            # Freeze the graph
            frozen_graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def, output_node_names)
            # Save the frozen graph
            with open(modelParams['trainLogDir'] + '_v/model.pb', 'wb') as f:
                f.write(frozen_graph_def.SerializeToString())

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)
        print('QueueRunner  started')

        summaryWriter = tf.summary.FileWriter(modelParams['logDir'],
                                              sess.graph)
        summaryValiWriter = tf.summary.FileWriter(modelParams['logDir'] + '_v',
                                                  sess.graph)
        #TEST###        summaryValiWriter = tf.summary.FileWriter(modelParams['logDir']+'_test', sess.graph)

        print('Testing     started')
        durationSum = 0
        durationSumAll = 0
        prevLoss = 99999
        prevValiSumLoss = 99999
        prevaccur = 0
        prevLossStep = 0
        prevStep = 21000
        #TEST###        prevTestSumLoss = 99999
        prevStep = int(modelParams['maxSteps'] / 2)
        l = list()
        import cv2
        lossValueSum = 0
        l2regValueSum = 0

        total_parameters = 0
        for variable in tf.trainable_variables():
            # shape is an array of tf.Dimension
            shape = variable.get_shape()
            #print(shape)
            #print(len(shape))
            variable_parameters = 1
            for dim in shape:
                #print(dim)
                variable_parameters *= dim.value
            #print(variable_parameters)
            total_parameters += variable_parameters
        print('-----total parameters-------- ', total_parameters)

        true_counter = 0
        for step in xrange(0, modelParams['maxSteps']):  #(0, 1000):
            startTime = time.time()
            #npfilename, npTargetP, npTargetT, npPng = sess.run([filename, targetP, targetT, pngTemp])
            #np_out, np_inp = sess.run([output_res, input_data])
            #npfilename = np_inp['filename']
            #npTargetP = np_out['clsf']
            #npTargetT = np_inp['clsf']
            #npfilename, npTargetT, npTargetP = sess.run([input_data['filename'], input_data['clsf'], output_res['clsf']])
            npfilename, npTargetT, npTargetP, npImageP = sess.run([
                input_data['filename'], input_data['clsf'], output_res['clsf'],
                output_res['deconv']
            ])
            #print(modelParams['outputDir']+str(step+10000)+'.jpg')
            # 1-mask imwrite
            cv2.imwrite(modelParams['outputDir'] + str(step + 10000) + '.jpg',
                        npImageP[0, :, :, 0])
            # 6-mask imwrite
            #for hemaps in range(modelParams['num_heatmap']):
            #    cv2.imwrite(modelParams['outputDir']+str(step+10000)+'_'+str(hemaps)+'.jpg', npImageP[0,:,:,hemaps])
            duration = time.time() - startTime
            if step != 0:
                l.append(duration)
            print(duration, step, modelParams['maxSteps'])
            #lossValueSum += l ossValue

            for i in range(modelParams['activeBatchSize']):
                if np.argmax(npTargetP[i, :]) == np.argmax(npTargetT[i, :]):
                    match = True
                    true_counter += 1
                else:
                    match = False
                print(np.argmax(npTargetP[i, :]), np.argmax(npTargetT[i, :]),
                      match, '----counter:', true_counter)
                #inp_out_img = np.concatenate((np_inp['deconv'][i,:,:], np_out['deconv'][i,:,:]), axis=0)
                #cv2.imshow('in --- out', cv2.resize(inp_out_img,(350,300)))
                #cv2.waitKey(0)

            #print(npfilename)
            #print(npTargetT)
            #print(npTargetP)

            #p1 = npPng[0,:,:,0]
            #p2 = npPng[0,:,:,1]
            #p1 = (p1-np.min(p1)) / (np.max(p1)-np.min(p1))
            #p2 = (p2-np.min(p2)) / (np.max(p2)-np.min(p2))
            #cv2.imshow('img0', p1)
            #cv2.imshow('img1', p2)
            #cv2.waitKey(0)
            #print(npfilename)
            print(duration, step, modelParams['maxSteps'])
            data_output.output(str(10000 + step), npfilename, npTargetP,
                               npTargetT, **modelParams)
            # Print Progress Info
            if ((step % FLAGS.ProgressStepReportStep)
                    == 0) or ((step + 1) == modelParams['maxSteps']):
                print(
                    'Progress: %.2f%%, Elapsed: %.2f mins, Testing Completion in: %.2f mins --- %s'
                    %
                    ((100 * step) / modelParams['maxSteps'], durationSum / 60,
                     (((durationSum * modelParams['maxSteps']) /
                       (step + 1)) / 60) - (durationSum / 60), datetime.now()))
            #if step == 128:
            #    modelParams['phase'] = 'train'
            #
            #if step == 130:
            #    modelParams['phase'] = 'test'
        print(np.array(l).mean())
        #l0 = np.array(l)
        #l1 = np.array(l[1:-1])
        #print(np.average(l0))
        #print(np.average(l1))

        print('----- maxsteps:', modelParams['maxSteps'], '--- step:', step)
        #print('----- maxsteps:', modelParams['maxSteps'], '--- loss avg:', lossValueSum/modelParams['maxSteps'])
        #print('----- train scaled loss:', (lossValueSum/modelParams['maxSteps'])*modelParams['trainBatchSize'])
        print(modelParams['outputDir'])

        sess.close()
    tf.reset_default_graph()
Exemplo n.º 20
0
#multiply and accumulate matrices
import tensorflow as tf
import numpy as np

graph =tf.Graph()
x_s = [2,2]
y_s = [2,2]
with graph.as_default() as g:
  x = tf.placeholder(tf.float32,x_s)
  y = tf.placeholder(tf.float32,y_s)
  z = tf.matmul(x,y)
  w = tf.reduce_sum(z)
x_ = np.array([[2,2],[2,2]])
y_ = np.array([[3,3],[3,3]])
with tf.Session(graph=g) as sess:
  mat_,sum_ = sess.run([z,w],feed_dict={x:x_,y:y_})
  
print("Mul: ", mat_)
print("Sum: ", sum_)


Exemplo n.º 21
0
    im = np.asarray(im)
    if im.mean()>100:
        im=255-im
    result=np.append(result,im)

im=np.zeros((FLAGS.train_batch_size-len(list),28,28))
im = np.asarray(im) 
result=np.append(result,im)

result=result.reshape(-1,28,28)
result=result/256

checkpoint_file = FLAGS.checkpoint_file
graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file),clear_devices=True)
        saver.restore(sess, checkpoint_file)

        input_x = sess.graph.get_tensor_by_name("input:0")
        out_softmax = sess.graph.get_tensor_by_name("softmax:0")

        img_out_softmax = sess.run(out_softmax, feed_dict={input_x:result})
        prediction_labels = np.argmax(img_out_softmax, axis=1)
        print("label:", prediction_labels[:len(list)])
        print(list)
Exemplo n.º 22
0
def main():
    with tf.Session() as session:
        model = BPNeuralNetwork(session)
        model.test()
Exemplo n.º 23
0
                           normalized_embeddings,
                           transpose_b=True)

    # Merge all summaries.
    merged = tf.summary.merge_all()

    # Add variable initializer.
    init = tf.global_variables_initializer()

    # Create a saver.
    saver = tf.train.Saver()

# Step 5: Begin training.
num_steps = 100001

with tf.Session(graph=graph) as session:
    # Open a writer to write summaries.
    writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)

    # We must initialize all variables before we use them.
    init.run()
    print('Initialized')

    average_loss = 0
    for step in xrange(num_steps):
        batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
                                                    skip_window)
        feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}

        # Define metadata variable.
        run_metadata = tf.RunMetadata()
Exemplo n.º 24
0
    test_model = 2
    is_training = True
    checkpoint_dir = './checkpoint'
    loss = 'cross-entropy'
    final_act = 'softmax'
    hidden_act = 'tanh'
    n_items = -1


if __name__ == '__main__':
    print "Seve - Tez Cadey"
    ###加载数据集###
    TRAIN = './data/rsc15_train_full.txt'
    TEST = './data/rsc15_test.txt'
    data = pd.read_csv(TRAIN, sep='\t', dtype={'ItemId': np.int64})
    valid = pd.read_csv(TEST, sep='\t', dtype={'ItemId': np.int64})
    ###参数定义###
    args = Args()
    args.n_items = len(data['ItemId'].unique())
    if not os.path.exists(args.checkpoint_dir):
        os.mkdir(args.checkpoint_dir)
    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True
    with tf.Session(config=gpu_config) as sess:
        gru = model.GRU4Rec(sess, args)
        gru.fit(data)
    ###验证集效果###
    res = utils.evaluate(gru, data, valid)
    print 'Recall@20', res[0]
    print 'MRR@20', res[1]
Exemplo n.º 25
0
def process(show_data_dir,
            aligned_data_dir,
            model,
            classifier_filename,
            mode,
            batch_size=90,
            image_size=160):

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # np.random.seed(seed=args.seed)

            dataset = FaceR.get_dataset(aligned_data_dir)

            paths, labels = FaceR.get_image_paths_and_labels(dataset)

            print('Number of classes: %d' % len(dataset), file=sys.stderr)
            print('Number of images: %d' % len(paths), file=sys.stderr)

            # Load the model
            print('Loading feature extraction model', file=sys.stderr)
            FaceR.load_model(model)
            # FaceR.load_model('/Users/lzh/models/facenet/20180408-102900/20180408-102900.pb')

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images', file=sys.stderr)
            nrof_images = len(paths)
            nrof_batches_per_ecoch = int(
                math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_ecoch):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = FaceR.load_data(paths_batch, False, False, image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            classifier_filename_exp = os.path.expanduser(classifier_filename)

            # Train the classifer
            if (mode == 'TRAIN'):

                # Use SVM as the algorithm of the classifier
                clf = svm.SVC(kernel='linear', probability=True)

                # Fit the features and labels to SVM
                clf.fit(emb_array, labels)

                # Create a list of class names
                class_names = [cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((clf, class_names), outfile)
                print('Saved classifier model to file "%s"' %
                      classifier_filename_exp,
                      file=sys.stderr)

            # Classify images
            elif (mode == 'CLASSIFY'):

                print('Classify images', file=sys.stderr)

                # Load the trained classifier
                with open(classifier_filename_exp, 'rb') as infile:
                    (clf, class_names) = pickle.load(infile)

                print('Loaded classifier from file "%s"' %
                      classifier_filename_exp,
                      file=sys.stderr)

                # Predict the new image by using its face features
                predictions = clf.predict_proba(emb_array)

                probs = []
                for i in range(len(predictions)):
                    probs.append(list(zip(predictions[0], clf.classes_)))
                probs = probs[0]

                # Pick out the top 4 probabilities with their label
                quick_sort(probs, 0, len(probs) - 1)

                # Use tmp to store the dic of the result
                tmp = []
                for i in range(4):
                    pro = round(probs[i][0] * 100, 1)

                    # Get the data directory containing target image
                    target_data_dir = os.path.join(
                        show_data_dir,
                        class_names[probs[i][1]].replace(' ', '_'))
                    # Get the target image path
                    images = os.listdir(target_data_dir)

                    local_paths = [os.path.join(target_data_dir, img) for img in images \
                                   if img in image_list]

                    dict = {
                        'Name': class_names[probs[i][1]],
                        'probability': pro,
                        'Path': local_paths[0]
                    }
                    tmp.append(dict)

                res = json.dumps(tmp, ensure_ascii=False)
                print(res, end='')
Exemplo n.º 26
0
def predict_unseen_data():
	trained_dir = sys.argv[1]
	if not trained_dir.endswith('/'):
		trained_dir += '/'
	test_file = sys.argv[2]

	labels = load_trained_params(trained_dir)
	x_, y_, df,mysent = load_test_data(test_file, labels)
	my = x_

	x_test, y_test = np.asarray(x_), None
	if y_ is not None:
		y_test = np.asarray(y_)


	timestamp = trained_dir.split('/')[-2].split('_')[-1]
	predicted_dir = './predicted_results_' + timestamp + '/'
	if os.path.exists(predicted_dir):
		shutil.rmtree(predicted_dir)
	os.makedirs(predicted_dir)


	with tf.Graph().as_default():
		session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
		sess = tf.Session(config=session_conf)
		#the signature of the graph
		signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
		export_path =  './SavedModelB'
        # making a prediction from the file .pb
		meta_graph_def = tf.saved_model.loader.load(
			sess,
			[tf.saved_model.tag_constants.SERVING],
			export_path)
		signature = meta_graph_def.signature_def

		       #getting the tensors
		to_input_x = signature[signature_key].inputs['to_input_x'].name


		to_input_x =  sess.graph.get_tensor_by_name(to_input_x)


		to_predictions = signature[signature_key].outputs['to_predictions'].name
		to_predictions =  sess.graph.get_tensor_by_name(to_predictions)

		with sess.as_default():
			def real_len(batches):
				return [np.ceil(np.argmin(batch + [0]) * 1.0 / 4) for batch in batches]

			def predict_step(x_batch):
				#print(x_batch.tolist())
				print(x_batch)

				#print(real_len(x_batch))


				feed_dict = {
					to_input_x: x_batch,
				}
				predictions = sess.run([to_predictions], feed_dict)
				print('----------------------------------------')

				return predictions

			batches = data_helper.batch_iter(list(x_test), 1, 1, shuffle=False)
			predictions, predict_labels = [], []
			i = 0
			for x_batch in batches:
				if len(x_batch) != 0:
					batch_predictions = predict_step(x_batch)[0]
					for batch_prediction in batch_predictions:
						print('Prediction is :',batch_prediction,labels[batch_prediction],' :: ',np.argmax(y_test[i], axis=0),labels[np.argmax(y_test[i], axis=0)])# here we print the result of the prediction
						i = i+1
						predictions.append(batch_prediction)
						predict_labels.append(labels[batch_prediction])

			print('fffffff')
			# Save the predictions back to file
			df['NEW_PREDICTED'] = predict_labels
			columns = sorted(df.columns, reverse=True)
			df.to_csv(predicted_dir + 'predictions_all.csv', index=False, columns=columns, sep='|')

			if y_test is not None:
				y_test = np.array(np.argmax(y_test, axis=1))
				accuracy = sum(np.array(predictions) == y_test) / float(len(y_test))
				logging.critical('The prediction accuracy is: {}'.format(accuracy))

			logging.critical('Prediction is complete, all files have been saved: {}'.format(predicted_dir))
Exemplo n.º 27
0
    # sampled_proxy_losses = [
    #     get_loss(real_utility.output, proxy.output)
    #     for proxy in sampled_proxies
    # ]
    # sampled_proxy_opts = [
    #     (lambda opt_loss:
    #         (opt_loss, optimizer.minimize(opt_loss))
    #     )(
    #         opt_loss=get_loss(proxy.output, desired_output, stop_gradient=False),
    #     )
    #     for proxy in sampled_proxies
    # ]


    # Do a first pass of training real_utility on our random data.
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    do_optimization(
        sess,
        "RealUtilityTraining",
        real_loss,
        real_opt,
        REAL_TRAINING_STEPS,
        {
            real_utility.input: random_xs,
            desired_output: random_ys,
        },
    )

    # sess.run(
def main():
    # Verify that parameters are set correctly.
    args = parser.parse_args()

    #os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
    #gpu_options = tf.GPUOptions(allow_growth=True)
    #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))

    # Load the args from the original experiment.
    # -- args_file in 'experiments/demo_weighted_triplet/args.json'
    args_file = os.path.join(args.experiment_root, 'args.json')

    # -- Load the args.json file
    if os.path.isfile(args_file):
        if not args.quiet:
            print('Loading args from {}.'.format(args_file))
        with open(args_file, 'r') as f:
            args_resumed = json.load(f)

        # Add arguments from training.
        for key, value in args_resumed.items():
            args.__dict__.setdefault(key, value)

        # A couple special-cases and sanity checks
        if (args_resumed['crop_augment']) == (args.crop_augment is None):
            print('WARNING: crop augmentation differs between training and '
                  'evaluation.')
        args.image_root = args.image_root or args_resumed['image_root']
    else:
        raise IOError('`args.json` could not be found in: {}'.format(args_file))

    # Check a proper aggregator is provided if augmentation is used.
    if args.flip_augment or args.crop_augment == 'five':
        if args.aggregator is None:
            print('ERROR: Test time augmentation is performed but no aggregator'
                  'was specified.')
            exit(1)
    else:
        if args.aggregator is not None:
            print('ERROR: No test time augmentation that needs aggregating is '
                  'performed but an aggregator was specified.')
            exit(1)

    if not args.quiet:
        print('Evaluating using the following parameters:')
        for key, value in sorted(vars(args).items()):
            print('{}: {}'.format(key, value))

    # Load the data from the CSV file.

    net_input_size = (args.net_input_height, args.net_input_width)
    pre_crop_size = (args.pre_crop_height, args.pre_crop_width)

    # Load detections
    matfile = sio.loadmat(args.detections_path)
    detections = matfile['detections']
    num_detections = detections.shape[0]

    # Setup a tf Dataset generator
    # -- generator will load the detection img from duke dataset and resize the detection img  to be the net_input size.
    generator = functools.partial(detections_generator, args.dataset_path, detections, net_input_size[0], net_input_size[1])
    dataset = tf.data.Dataset.from_generator(generator, tf.float32, tf.TensorShape([net_input_size[0], net_input_size[1], 3]))
    
    modifiers = ['original']
    if args.flip_augment:
        dataset = dataset.map(flip_augment)
        dataset = dataset.apply(tf.contrib.data.unbatch())
        modifiers = [o + m for m in ['', '_flip'] for o in modifiers]

    if args.crop_augment == 'center':
        dataset = dataset.map(lambda im, fid, pid:
            (five_crops(im, net_input_size)[0], fid, pid))
        modifiers = [o + '_center' for o in modifiers]
    elif args.crop_augment == 'five':
        dataset = dataset.map(lambda im, fid, pid:
            (tf.stack(five_crops(im, net_input_size)), [fid]*5, [pid]*5))
        dataset = dataset.apply(tf.contrib.data.unbatch())
        modifiers = [o + m for o in modifiers for m in [
            '_center', '_top_left', '_top_right', '_bottom_left', '_bottom_right']]
    elif args.crop_augment == 'avgpool':
        modifiers = [o + '_avgpool' for o in modifiers]
    else:
        modifiers = [o + '_resize' for o in modifiers]
    

    # Group it back into PK batches.
    dataset = dataset.batch(args.batch_size)

    # Overlap producing and consuming.
    dataset = dataset.prefetch(args.batch_size)
    images = dataset.make_one_shot_iterator().get_next()

    # Create the model and an embedding head.
    model = import_module('nets.' + args.model_name)
    head = import_module('heads.' + args.head_name)

    endpoints, body_prefix = model.endpoints(images, is_training=False)
    with tf.name_scope('head'):
        endpoints = head.head(endpoints, args.embedding_dim, is_training=False)

    with h5py.File(args.filename, 'w') as f_out, tf.Session() as sess:
        # Initialize the network/load the checkpoint.
        if args.checkpoint is None:
            checkpoint = tf.train.latest_checkpoint(args.experiment_root)
        else:
            checkpoint = os.path.join(args.experiment_root, args.checkpoint)
        if not args.quiet:
            print('Restoring from checkpoint: {}'.format(checkpoint))
        tf.train.Saver().restore(sess, checkpoint)

        # Go ahead and embed the whole dataset, with all augmented versions too.
        emb_storage = np.zeros(
            (num_detections * len(modifiers), args.embedding_dim), np.float32)
        
        for start_idx in count(step=args.batch_size):
            try:
                emb = sess.run(endpoints['emb'])
                print('\rEmbedded batch {}-{}/{}'.format(
                        start_idx, start_idx + len(emb), len(emb_storage)),
                    flush=True, end='')
                emb_storage[start_idx:start_idx + len(emb)] = emb
            except tf.errors.OutOfRangeError:
                break  # This just indicates the end of the dataset.

        if not args.quiet:
            print("Done with embedding, aggregating augmentations...", flush=True)

        if len(modifiers) > 1:
            # Pull out the augmentations into a separate first dimension.
            emb_storage = emb_storage.reshape(len(data_fids), len(modifiers), -1)
            emb_storage = emb_storage.transpose((1,0,2))  # (Aug,FID,128D)

            # Store the embedding of all individual variants too.
            emb_dataset = f_out.create_dataset('emb_aug', data=emb_storage)

            # Aggregate according to the specified parameter.
            emb_storage = AGGREGATORS[args.aggregator](emb_storage)

        # Store the final embeddings.
        emb_dataset = f_out.create_dataset('emb', data=emb_storage)

        # Store information about the produced augmentation and in case no crop
        # augmentation was used, if the images are resized or avg pooled.
        f_out.create_dataset('augmentation_types', data=np.asarray(modifiers, dtype='|S'))
Exemplo n.º 29
0
#########################
bubbles = False
if bubbles:
    import tensorflow as tf
    from keras.backend.tensorflow_backend import set_session
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.6
    set_session(tf.Session(config=config))
#########################


#imports
import cv2
import time
import scipy as sp
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
from functools import partial
import matplotlib.pyplot as plt

import tensorflow as tf
import keras
from keras import initializers
from keras import regularizers
from keras import constraints
from keras import backend as K
from keras.activations import elu
from keras.optimizers import Adam
from keras.models import Sequential
Exemplo n.º 30
0
    def __init__(
        self,
        n_actions,
        n_features,
        learning_rate=0.01,
        reward_decay=0.9,
        e_greedy=0.9,
        replace_target_iter=320,
        memory_size=100000,
        batch_size=64,
        e_greedy_increment=None,
        output_graph=False,
        split_size=12,
        window_size=20,
    ):
        self.n_actions = n_actions
        self.n_features = n_features
        self.lr = learning_rate
        self.gamma = reward_decay
        self.epsilon_max = e_greedy
        self.replace_target_iter = replace_target_iter
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.split_size = split_size
        self.window_size = window_size
        self.channels = 9
        self.epsilon_increment = e_greedy_increment
        self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
        self.lr2 = 0

        self.conv_keep_prob = 0.9
        self.gru_keep_prob = 0.5
        self.dense_keep_prob = 0.5

        # total learning step
        self.learn_step_counter = 0

        # initialize zero memory [s, a, r, s_]
        self.memory = np.zeros((self.memory_size, n_features * 2 + 2))

        #learning rate
        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = tf.train.exponential_decay(self.lr,
                                                        self.global_step,
                                                        100000,
                                                        0.96,
                                                        staircase=True)

        # consist of [target_net, evaluate_net]
        self._build_net()

        t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope='target_net')
        e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope='eval_net')

        with tf.variable_scope('soft_replacement'):
            self.target_replace_op = [
                tf.assign(t, e) for t, e in zip(t_params, e_params)
            ]

        self.sess = tf.Session()

        if output_graph:
            # $ tensorboard --logdir=logs
            tf.summary.FileWriter("logs/", self.sess.graph)

        self.sess.run(tf.global_variables_initializer())
        self.cost_his = []