Exemple #1
0
def main(_):
    hooks = [
        tf.train.LoggingTensorHook(['train_accuracy', 'train_loss'],
                                   every_n_iter=10)
    ]

    NUM_GPUS = 3
    strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=NUM_GPUS)
    config = tf.estimator.RunConfig(train_distribute=strategy)
    #estimator = tf.keras.estimator.model_to_estimator(model, config=config)

    mnist_classifier = tf.estimator.Estimator(model_fn=model_function,
                                              model_dir=FLAGS.model_dir,
                                              config=config)

    timer('TRAIN_AND_EVALUATE')

    for _ in range(FLAGS.num_epochs):
        mnist_classifier.train(
            input_fn=train_data,
            hooks=hooks,
        )
        mnist_classifier.evaluate(input_fn=eval_data)

    timer()
Exemple #2
0
def calibrate():
    data = parse_data()

    end = timer()
    homographies = compute_homography(data)
    end("Homography Estimation")
    print "homographies"
    print homographies

    end = timer()
    intrinsics = get_camera_intrinsics(homographies)
    end("Intrinsics")

    print "intrinsics"
    print intrinsics

    end = timer()
    extrinsics = get_camera_extrinsics(intrinsics, homographies)
    end("Extrinsics")

    print "extrinsics"
    print extrinsics

    end = timer()
    distortion = estimate_lens_distortion(intrinsics, extrinsics, data["real"],
                                          data["sensed"])
    end("Distortion")

    return
Exemple #3
0
def calibrate():
    data = prepare_data()
    #print(data)

    end = timer()
    homographies = compute_homography(data)
    end("Homography Estimation")
    print("homographies")
    print(homographies)

    end = timer()
    intrinsics = get_camera_intrinsics(homographies)
    end("Intrinsics")

    print("intrinsics")
    print(intrinsics)

    end = timer()
    extrinsics = get_camera_extrinsics(intrinsics, homographies)
    end("Extrinsics")

    print("extrinsics")
    print(extrinsics)

    return
Exemple #4
0
def compute_homography(data):
    end = timer()
    real = data['real']
    es = []

    for i in range(0, len(data['sensed'])):
        sensed = data['sensed'][i]
        estimated = estimate_homography(real, sensed)
        es.append(estimated)
        end = timer()

    end("compute_homography")
    return np.array(es)
Exemple #5
0
    def rSort(self):
        for i in range(1000, 101000 + 1, 10000):
            # Radix sort
            current_file_path = self.current_file_path(i)
            init_array = self.get_correct_array(current_file_path)

            start_time = timer()
            number_of_operations = radixsort.radix_sort(init_array)
            end_time = timer()

            algorithm_time = get_difference(start_time, end_time)
            number_of_elements = i

            self.save_to_file("radixsort", number_of_elements,
                              number_of_operations, algorithm_time)
Exemple #6
0
def compute_homography(data):
    end = timer()

    real = data['real']

    refined_homographies = []

    for i in range(0, len(data['sensed'])):
        sensed = data['sensed'][i]
        estimated = estimate_homography(real, sensed)
        end = timer()
        refined = refine_homography(estimated, sensed, real)
        refined = refined / refined[-1]
        end("refine_homography")
        refined_homographies.append(estimated)

    end("compute_homography")
    return np.array(refined_homographies)
Exemple #7
0
def get_normalisation_matrix(flattened_corners):
    end = timer()

    avg_x = flattened_corners[:, 0].mean()
    avg_y = flattened_corners[:, 1].mean()

    s_x = np.sqrt(2 / flattened_corners[0].std())
    s_y = np.sqrt(2 / flattened_corners[1].std())

    end("get_normalization_matrix")
    return np.matrix([[s_x, 0, -s_x * avg_x], [0, s_y, -s_y * avg_y],
                      [0, 0, 1]])
def make_bottleneck_data(src_dir, shape, ratio):

    filenames_data = make_filenames_list_from_subdir(src_dir=src_dir,
                                                     shape=shape,
                                                     ratio=ratio)

    dataset = make_tf_dataset(filenames_data)

    timer('make_bottleneck')

    bottleneck_data = make_bottleneck_with_tf(dataset, shape=SHAPE)

    bottleneck_data['id_label'] = filenames_data['id_label']
    bottleneck_data['label_id'] = filenames_data['label_id']
    bottleneck_data['num_classes'] = filenames_data['num_classes']

    print('Train size:', len(bottleneck_data['train']['images']))
    print('Valid size:', len(bottleneck_data['valid']['images']))
    print('Test size:', len(bottleneck_data['test']['images']))

    return bottleneck_data
Exemple #9
0
def estimate_homography(first, second):
    end = timer()

    first_normalisation_matrix = get_normalisation_matrix(first)
    second_normalisation_matrix = get_normalisation_matrix(second)

    M = []

    for j in range(0, int(first.size / 2)):
        homogeneous_first = np.array([
            first[j][0],
            first[j][1],
            1
        ])

        homogeneous_second = np.array([
            second[j][0],
            second[j][1],
            1
        ])

        pr_1 = np.dot(first_normalisation_matrix, homogeneous_first)

        pr_2 = np.dot(second_normalisation_matrix, homogeneous_second)

        M.append(np.array([
            pr_1.item(0), pr_1.item(1), 1,
            0, 0, 0,
            -pr_1.item(0)*pr_2.item(0), -pr_1.item(1)*pr_2.item(0), -pr_2.item(0)
        ]))

        M.append(np.array([
            0, 0, 0, pr_1.item(0), pr_1.item(1),
            1, -pr_1.item(0)*pr_2.item(1), -pr_1.item(1)*pr_2.item(1), -pr_2.item(1)
        ]))

    U, S, Vh = np.linalg.svd(np.array(M).reshape((512, 9)))

    L = Vh[-1]

    H = L.reshape(3, 3)

    denormalised = np.dot(
        np.dot(
            np.linalg.inv(first_normalisation_matrix),
            H
        ),
        second_normalisation_matrix
    )

    end("estimate_homography")
    return denormalised / denormalised[-1, -1]
Exemple #10
0
    def __call__(self, max_iter=1000, train_set=None, validation_set=None):

        sess = self.sess
        net = self.net

        with sess.as_default():
            #for init model

            batch_size = 1

            if cfg.TRAINING_TIMER:
                time_it = timer()

            # start training here  #########################################################################################
            self.log_msg.write(
                'iter |  top_cls_loss   reg_loss   |  fuse_cls_loss  reg_loss  total |  \n'
            )
            self.log_msg.write(
                '-------------------------------------------------------------------------------------\n'
            )

            fit_iteration = self.fit_iteration_gen(max_iter)
            for iter in range(max_iter):
                self.n_iter = iter

                next(fit_iteration)

                if (iter +
                        1) % self.ckpt_save_step == 0 or self.fast_test_mode:
                    self.save_weights(self.train_target)
                    self.save_progress()

                if (iter + 1) % 200 == 0:
                    if cfg.TRAINING_TIMER:
                        self.log_msg.write('It takes %0.2f secs to train %d iterations. \n' % \
                                           (time_it.time_diff_per_n_loops(), 200))
                self.n_global_step += 1

            if cfg.TRAINING_TIMER:
                self.log_msg.write('It takes %0.2f secs to train the dataset. \n' % \
                                   (time_it.total_time()))

            self.save_weights(self.train_target)
            self.save_progress()
Exemple #11
0
    def __call__(self, max_iter=1000, train_set =None, validation_set =None):

        sess = self.sess
        net = self.net

        with sess.as_default():
            #for init model

            batch_size=1

            if cfg.TRAINING_TIMER:
                time_it = timer()

            # start training here  #########################################################################################
            self.log_msg.write('iter |  top_cls_loss   reg_loss   |  fuse_cls_loss  reg_loss  total |  \n')
            self.log_msg.write('-------------------------------------------------------------------------------------\n')

            fit_iteration =self.fit_iteration_gen(max_iter)
            for iter in range(max_iter):
                self.n_iter=iter

                next(fit_iteration)

                if (iter+1)%self.ckpt_save_step==0 or self.fast_test_mode:
                    self.save_weights(self.train_target)
                    self.save_progress()

                if (iter+1)%200==0:
                    if cfg.TRAINING_TIMER:
                        self.log_msg.write('It takes %0.2f secs to train %d iterations. \n' % \
                                           (time_it.time_diff_per_n_loops(), 200))
                self.n_global_step += 1


            if cfg.TRAINING_TIMER:
                self.log_msg.write('It takes %0.2f secs to train the dataset. \n' % \
                                   (time_it.total_time()))

            self.save_weights(self.train_target)
            self.save_progress()
def compare_questions(df_ques, dfcore, min_number_results=5, tests=None):
    """
	Parameters
	----------
	df : DataFrame, where column is question ids, and rows is result ids:
	  2048  2049  2050  2051  2052  2053  2054  2055  2056  2057  ...   2038 
1537    50    50    50    50    50    50    50    50    50    50  ...     50   
1538     1    50     0     1    50     0     1    50     0     0  ...      1   

	min_number_results : int, minimal number of common result for two questions,
		when we calculate correlation. If num results < this value, then we claim, that
		there is no correlation.

	Returns
	-------
	corr: from 0 to 1:
			= 1 -- positive corr.
			= 0 -- negativ corr.
			= 0.5 - no corr.
	phi: from -1 to 1.
	"""

    logging.info('compare questions')

    #df = df_ques # for short
    ids = set(df_ques.columns)
    ids_from_tests = get_questions_of_tests(dfcore, tests)
    ids = ids & ids_from_tests  #! select only those questions that are in given tests

    num = len(ids)

    #corr = np.zeros(shape=(num, num), dtype=float) # correlation based on similar answer
    #phi = np.zeros(shape=(num, num), dtype=float)  # phi-value
    #ratio = np.zeros(shape=(num, num), dtype=float)
    #num_common_results = np.zeros(shape=(num, num), dtype=int) # the number of common results for two question

    question_answers_dict = get_question_answers_dict(dfcore)
    print(question_answers_dict)

    ques_labels, ques_labels_dict = database_connect.load_question_labels(
        tests=tests)  # to get test_id for a question
    test_id_by_ques_id = {
        ques_id: ques_labels_dict[ques_id]['test']
        for ques_id in ids
    }

    #coeff_dict = dict()
    #abs_coeff_dict = dict()
    abs_test_coeff_dict = dict()
    num_pair_dict = dict()
    total_count_answer_pair = 0  # count number of all pair of answers
    #total_answers_test_number = 0

    #df_matrix_corr = pd.DataFrame()

    # calculate the number of rows in new table
    num_rows = 0
    for index1, id1 in enumerate(ids):
        #print('{0}/{1}, id={2}'.format(1+index1, len(ids), id1))
        for index2, id2 in enumerate(ids):
            if id1 == id2: continue  # cause the correlation is always = 1
            num_ans1 = len(question_answers_dict[id1])  # we get it from dfcore
            num_ans2 = len(question_answers_dict[id2])
            num_rows += num_ans1 * num_ans2

    print('num_rows =', num_rows)
    #sys.exit(0)

    #matrix_corr = np.array(shape=)
    # prepare an empty dataframe for data store

    #df_matrix_corr = pd.DataFrame(0, index=np.arange(num_rows),\
    #columns=['test_id_1', 'test_id_2', 'question_id_1', 'question_id_2',\
    #	columns=['question_id_1', 'question_id_2',\
    #	'answer_id_1', 'answer_id_2'])

    matrix_corr1 = np.zeros(shape=(num_rows, 6), dtype=np.int32)
    matrix_corr2 = np.empty(shape=(num_rows, 3), dtype=np.float32)
    matrix_corr2.fill(np.nan)

    #df_matrix_corr['coeff'] = np.nan
    #df_matrix_corr['abs_coeff'] = np.nan
    #df_matrix_corr['abs_test_coeff'] = np.nan
    #columns_dict = { key:i for i, key in enumerate(df_matrix_corr.columns)}
    #print(columns_dict)
    #print('df_matrix_corr:')
    #print(df_matrix_corr.info())

    #columns_dict = {'test_id_1':0, 'test_id_2':1, 'question_id_1':2, 'question_id_2':3,\
    #	'answer_id_1':4, 'answer_id_2':5}

    index = 0

    timer('start')

    USE_PD = False

    for index1, id1 in enumerate(ids):
        #print('{0}/{1}, id={2}'.format(1+index1, len(ids), id1))
        timer('{0}/{1}, id={2}'.format(1 + index1, len(ids), id1))

        for index2, id2 in enumerate(ids):

            if id1 == id2: continue  # cause the correlation is always = 1
            #print('  id2={0}'.format(id2))

            if USE_PD:
                df = df_ques[[id1,
                              id2]]  # select data only for given two questions
                df = df[(df[id1] != NAN) &
                        (df[id2] != NAN)]  # and find intersection of results
                num = len(df)  # общее кол-вот пар ответов a-b
            else:
                values = df_ques[[id1, id2]].values
                arr = values[(values[:, 0] != NAN) & (values[:, 1] != NAN)]
                num = arr.shape[0]

            total_count_answer_pair += num

            #if num < min_number_results:
            #	logging.debug('num = {0} < min_number_results. continue'.format(num))
            #	continue

            answers1 = question_answers_dict[id1]  # we get it from dfcore
            answers2 = question_answers_dict[id2]
            #answers1 = set(df[id1]) # we get it from df
            #answers2 = set(df[id2])

            #coeff_ans_dict = dict()
            #abs_coeff_ans_dict = dict()
            #abs_test_coeff_ans_dict = dict()

            for ans1 in answers1:

                if USE_PD:
                    num_ans1 = len(
                        df[df[id1] ==
                           ans1])  # N(A1) = P(A1) * num --- count of answer1
                else:
                    # N(A1) = P(A1) * num --- count of answer1
                    num_ans1 = np.count_nonzero(arr[:, 0] == ans1)

                for ans2 in answers2:

                    if USE_PD:
                        num_ans1_ans2 = len(
                            df[(df[id1] == ans1) & (df[id2] == ans2)]
                        )  # N(A1,A2) count of ans1 and ans2
                        num_ans2 = len(
                            df[df[id1] == ans1]
                        )  # N(A2) = P(A2) * num --- count of answer2
                    else:
                        num_ans1_ans2 = np.count_nonzero((arr[:, 0] == ans1)
                                                         & (arr[:, 1] == ans2))
                        num_ans2 = np.count_nonzero(arr[:, 1] == ans2)

                    if num_ans1_ans2 > num_ans1:
                        print('num_ans1_ans2=', num_ans1_ans2)
                        print('num_ans1=', num_ans1)

                    assert num_ans1_ans2 <= num
                    assert num_ans1_ans2 <= num_ans1
                    assert num > 0

                    P_A1_A2 = num_ans1_ans2 / num  # = P(A1,A2)
                    P_A1_A2 = round(min(P_A1_A2, 1.0), 3)

                    if DEBUG:
                        print('id1, id2 = ', (id1, id2))
                        print('ans1, ans2 = ', (ans1, ans2))
                        print('num = ', num)
                        print('num_ans1_ans2 =', num_ans1_ans2)
                        print('P_A1_A2 =', P_A1_A2)

                    if num_ans1 > 0:
                        P_A2_cond_A1 = num_ans1_ans2 / num_ans1  # = P(A2|A1) = P(A1,A2) / P(A1)
                        P_A2_cond_A1 = round(min(P_A2_cond_A1, 1.0), 3)
                    else:
                        P_A2_cond_A1 = np.nan

                    #coeff_ans_dict[(ans1,ans2)] = P_A2_cond_A1
                    #abs_coeff_ans_dict[(ans1,ans2)] = P_A1_A2

                    if (ans1, ans2) in num_pair_dict:
                        num_pair_dict[(ans1, ans2)] += num_ans1_ans2
                        #print('add ', num_ans1_ans2, ' for ', (ans1,ans2))
                    else:
                        num_pair_dict[(ans1, ans2)] = num_ans1_ans2

                    if DEBUG:
                        print('{0}-{1} ({2},{3}): {4:.4f} | {5:.4f} | {6}'.\
                         format(id1, id2, ans1, ans2, P_A2_cond_A1, P_A1_A2, num_pair_dict[(ans1,ans2)]))

                    #num_ans2 = len(df[df[id2]==ans2]) # count of answer2

                    #print('index =', index)
                    assert index < num_rows

                    matrix_corr1[index, 0] = test_id_by_ques_id[id1]
                    matrix_corr1[index, 1] = test_id_by_ques_id[id2]
                    matrix_corr1[index, 2] = id1
                    matrix_corr1[index, 3] = id2
                    matrix_corr1[index, 4] = ans1
                    matrix_corr1[index, 5] = ans2
                    matrix_corr2[index, 0] = P_A2_cond_A1  # coeff
                    matrix_corr2[index, 1] = P_A1_A2  # abs_coeff
                    #matrix_corr2[index, 2] = num_ans2     # abs_test_coeff - MY
                    matrix_corr2[index, 2] = P_A1_A2  # abs_test_coeff - how in
                    #df_matrix_corr.iloc[index, columns_dict['coeff']] = P_A2_cond_A1
                    #df_matrix_corr.iloc[index, columns_dict['abs_coeff']] = P_A1_A2
                    index += 1

                    if DEBUG and id1 == 1 and id2 == 2 and ans1 == 1 and ans2 == 8:
                        print(matrix_corr1[index, :])
                        print(matrix_corr2[index, :])
                        sys.exit()

            #coeff_dict[(id1,id2)] = coeff_ans_dict
            #abs_coeff_dict[(id1,id2)] = abs_coeff_ans_dict
    """
	all_answers = set(dfcore['answer_id'])		
	for index1, ans1 in enumerate(all_answers):
		print('{0}/{1}, id={2}'.format(1+index1, len(ids), id1))		
		for index2, ans2 in enumerate(all_answers):			
			for pair in num_pair_dict:
				abs_test_coeff_dict[pair] = num_pair_dict[pair] / count_pair
	"""

    sum_num_pair_dict = sum(num_pair_dict.values())
    assert total_count_answer_pair == sum_num_pair_dict
    print('total_count_answer_pair =', total_count_answer_pair)
    print('sum_num_pair_dict =', sum_num_pair_dict)

    #for i in range(num_rows):
    #	for pair in num_pair_dict:
    #		if (matrix_corr1[i,4] == pair[0]) and (matrix_corr1[i,5] == pair[1]):
    #			matrix_corr2[i,2] = num_pair_dict[pair]
    """
	# 1-st way:
	for pair in num_pair_dict:
		matrix_corr2[((matrix_corr1[:,4] == pair[0]) & (matrix_corr1[:,5] == pair[1])), 2] = num_pair_dict[pair]

	matrix_corr2[:,2] = matrix_corr2[:,2] / sum_num_pair_dict
	"""

    matrix_corr2[:,
                 2] = matrix_corr2[:,
                                   2] * MULT_abs_test_coeff / total_count_answer_pair
    matrix_corr2[:, 2] = np.where(matrix_corr2[:, 2] <= 1.0,
                                  matrix_corr2[:, 2], 1.0)
    matrix_corr2[:, 2] = np.round(matrix_corr2[:, 2], 7)
    #matrix_corr2[:,0] = np.where(matrix_corr2[:,0] <= 1.0, matrix_corr2[:,0], 1.0)
    #matrix_corr2[:,1] = np.where(matrix_corr2[:,1] <= 1.0, matrix_corr2[:,1], 1.0)

    # fill tests id.
    result = (matrix_corr1, matrix_corr2)
    df_matrix_corr = pd.DataFrame(matrix_corr1,\
     columns=['test_id_1', 'test_id_2', 'question_id_1', 'question_id_2','answer_id_1', 'answer_id_2'])  #df_matrix_corr = pd.DataFrame(0, index=np.arange(num_rows),\
    #columns=['test_id_1', 'test_id_2', 'question_id_1', 'question_id_2',\
    #	columns=['question_id_1', 'question_id_2',\
    #	'answer_id_1', 'answer_id_2'])
    df_matrix_corr['coeff'] = matrix_corr2[:, 0]
    df_matrix_corr['abs_coeff'] = matrix_corr2[:, 1]
    df_matrix_corr['abs_test_coeff'] = matrix_corr2[:, 2]

    result = {
        'df_matrix_corr': df_matrix_corr,
        'matrix_corr1': matrix_corr1,
        'matrix_corr2': matrix_corr2,
        'num_pair_dict': num_pair_dict
    }

    return result
Exemple #13
0
def pred_and_save(tracklet_pred_dir, dataset,frame_offset=0, log_tag=None, weights_tag=None):
    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag)

    queue = deque(maxlen=1)

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    # dataset.size - 1 for in dataset.get_shape(), a frame is used. So it'll omit first frame for prediction,
    # fix this if has more time
    for i in range(dataset.size-1 if fast_test == False else frame_offset + 1):

        rgb, top, front, _, _, frame_id = dataset.load()

        # handling multiple bags.
        current_bag = frame_id.split('/')[1]
        current_frame_num = int(frame_id.split('/')[2])
        if i == 0:
            prev_tag_bag = None
        else:
            prev_tag_bag = queue[0]
        if current_bag != prev_tag_bag:
            # print('current bag name: ', current_bag, '. previous bag name ', prev_tag_bag)
            if i != 0:
                tracklet.write_tracklet()
            tracklet = Tracklet_saver(tracklet_pred_dir, current_bag,exist_ok=True)
            # print('frame counter reset to 0. ')
        queue.append(current_bag)

        # frame_num = i - frame_offset
        # if frame_num < 0:
        #     continue

        # detection
        boxes3d, probs = predict(top, front, rgb)
        # predict.dump_log(log_subdir=os.path.join('tracking',log_tag), n_frame=i, frame_tag=frame_id)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        if len(boxes3d) != 0:
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            # print(translation)
            # print(len(translation))
            # add to tracklets
            for j in range(len(translation)):
                # if 0 < translation[j, 1] < 8:
                # print('pose wrote. '
                tracklet.add_tracklet(current_frame_num, size[j], translation[j], rotation[j], probs[j],boxes3d[j])

        # print('frame_counter is here: ', current_frame_num, ' and i is here: ', i, 'frame id is here: ', frame_id)



    tracklet.write_tracklet()
    predict.save_weights(dir=os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
Exemple #14
0
def pred_and_save(tracklet_pred_dir,
                  dataset,
                  generate_video=False,
                  frame_offset=16,
                  log_tag=None,
                  weights_tag=None):
    # Tracklet_saver will check whether the file already exists.
    tracklet = Tracklet_saver(tracklet_pred_dir, 'pred')
    os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True)
    gt_tracklet = Tracklet_saver(tracklet_pred_dir, 'gt')

    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape,
                             front_shape,
                             rgb_shape,
                             log_tag=log_tag,
                             weights_tag=weights_tag)

    if generate_video:
        vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4'))

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    print('dataset.size')
    print(dataset.size)
    lenght = []
    gt_lenght = []

    frame_num = 0
    for i in range(dataset.size if fast_test == False else frame_offset + 1):

        rgb, top, front, _, _, _ = dataset.load(size=1)

        frame_num = i - frame_offset
        print('frame_num')
        print(frame_num)
        if frame_num < 0:
            continue

        gt_boxes3d_tmp = np.load(
            '/home/mohsen/Desktop/MV3D/data/preprocessed/kitti/gt_boxes3d/object3d/test/%05d.npy'
            % i)

        #remove gt boxes with hiegh less than 40
        gt_boxes3d_list = []
        for gt_box3d_tmp in gt_boxes3d_tmp:
            # if gt_box3d_tmp[0,0]>0:
            gt_box3d_tmp_list = []
            gt_box3d_tmp_list.append(gt_box3d_tmp)
            gt_project = box3d.box3d_to_rgb_box(gt_box3d_tmp_list)

            if abs(gt_project[0][0, 1] - gt_project[0][4, 1]) >= 40:
                gt_box3d = gt_box3d_tmp
                gt_boxes3d_list.append(gt_box3d)
        gt_boxes3d = np.array(gt_boxes3d_list)
        # gt_boxes3d = gt_boxes3d_tmp

        #####################################
        boxes3d_tmp, probs = predict(top, front, rgb)

        predict.dump_log(log_subdir=log_subdir, n_frame=i)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        # for debugging: save image and show image.
        top_image = draw_top_image(top[0])
        rgb_image = rgb[0]

        if len(gt_boxes3d) != 0:

            gt_lenght.append(len(gt_boxes3d))

            gt_translation, gt_size, gt_rotation = boxes3d_decompose(
                gt_boxes3d[:, :, :])

            # todo: remove it after gtbox is ok
            gt_size[:, 1:3] = gt_size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(gt_translation)):
                gt_tracklet.add_tracklet(frame_num, gt_size[j],
                                         gt_translation[j], gt_rotation[j])

        #remove predicted boxes with hiegh less than 40
        boxes3d_list = []
        for box3d_tmp in boxes3d_tmp:
            # if box3d_tmp[0, 0] > 0:

            box3d_tmp_list = []
            box3d_tmp_list.append(box3d_tmp)
            project = box3d.box3d_to_rgb_box(box3d_tmp_list)

            if abs(project[0][0, 1] - project[0][4, 1]) >= 40:
                print(project[0][0, 1] - project[0][4, 1])
                pred_box3d = box3d_tmp
                boxes3d_list.append(pred_box3d)
        boxes3d = np.array(boxes3d_list)
        # boxes3d = boxes3d_tmp

        #####################################
        print('sizes')
        print(np.size(boxes3d))
        print(gt_boxes3d)
        print(np.size(gt_boxes3d))

        if len(boxes3d) != 0:
            lenght.append(len(boxes3d))

            top_image = draw_box3d_on_top(top_image,
                                          boxes3d[:, :, :],
                                          color=(80, 80, 0),
                                          thickness=3)
            rgb_image = draw_box3d_on_camera(rgb_image,
                                             boxes3d[:, :, :],
                                             color=(0, 0, 80),
                                             thickness=3)

            if len(gt_boxes3d) != 0:
                rgb_image = draw_box3d_on_camera(rgb_image,
                                                 gt_boxes3d[:, :, :],
                                                 color=(0, 80, 0),
                                                 thickness=3)

            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])

            # todo: remove it after gtbox is ok
            size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(translation)):
                tracklet.add_tracklet(frame_num, size[j], translation[j],
                                      rotation[j])
        resize_scale = top_image.shape[0] / rgb_image.shape[0]
        rgb_image = cv2.resize(
            rgb_image,
            (int(rgb_image.shape[1] * resize_scale), top_image.shape[0]))
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
        new_image = np.concatenate((top_image, rgb_image), axis=1)
        cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i),
                    new_image)

        if generate_video:
            vid_in.writeFrame(new_image)
            vid_in.close()

    print(lenght)
    print(sum(lenght))
    tracklet.write_tracklet()
    predict.dump_weigths(os.path.join(log_dir, 'pretrained_model'))
    print(gt_lenght)
    print(sum(gt_lenght))
    gt_tracklet.write_tracklet()

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path, gt_tracklet.path
Exemple #15
0
    def __call__(self, max_iter=1000, train_set=None, validation_set=None):

        sess = self.sess
        net = self.net

        with sess.as_default():
            #for init model

            batch_size = 1

            validation_step = 40
            ckpt_save_step = 200

            if cfg.TRAINING_TIMER:
                time_it = timer()

            # start training here  #########################################################################################
            self.log_msg.write(
                'iter |  top_cls_loss   reg_loss   |  fuse_cls_loss  reg_loss  total |  \n'
            )
            self.log_msg.write(
                '-------------------------------------------------------------------------------------\n'
            )

            for iter in range(max_iter):

                is_validation = False
                summary_it = False
                summary_runmeta = False
                print_loss = False
                log_this_iter = False

                # set fit flag
                if iter % validation_step == 0:
                    summary_it, is_validation, print_loss = True, True, True  # summary validation loss
                if (iter + 1) % validation_step == 0:
                    summary_it, print_loss = True, True  # summary train loss
                if iter % 20 == 0: print_loss = True  #print train loss

                if 1 and iter % 300 == 0:
                    summary_it, summary_runmeta = True, True

                if iter % self.iter_debug == 0 or (iter +
                                                   1) % self.iter_debug == 0:
                    log_this_iter = True
                    print('Summary log image')
                    if iter % self.iter_debug == 0: is_validation = False
                    else: is_validation = True

                data_set = self.validation_set if is_validation else self.train_set
                self.default_summary_writer = self.val_summary_writer if is_validation else self.train_summary_writer

                step_name = 'validation' if is_validation else 'training'

                # load dataset
                self.batch_rgb_images, self.batch_top_view, self.batch_front_view, \
                self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id = \
                    data_set.load()

                # fit_iterate log init
                if log_this_iter:
                    self.time_str = strftime("%Y_%m_%d_%H_%M", localtime())
                    self.frame_info = data_set.get_frame_info()
                    self.log_subdir = step_name + '/' + self.time_str
                    top_image = data.draw_top_image(self.batch_top_view[0])
                    self.top_image = self.top_image_padding(top_image)

                # fit
                t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss= \
                    self.fit_iteration(self.batch_rgb_images, self.batch_top_view, self.batch_front_view,
                                       self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id,
                                       is_validation =is_validation, summary_it=summary_it,
                                       summary_runmeta=summary_runmeta, log=log_this_iter)

                if print_loss:
                    self.log_msg.write('%10s: |  %5d  %0.5f   %0.5f   |   %0.5f   %0.5f \n' % \
                                       (step_name, self.n_global_step, t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss))

                if iter % ckpt_save_step == 0:
                    self.save_weights(self.train_target)

                    if cfg.TRAINING_TIMER:
                        self.log_msg.write('It takes %0.2f secs to train %d iterations. \n' % \
                                           (time_it.time_diff_per_n_loops(), ckpt_save_step))
                self.gc()
                self.n_global_step += 1

            if cfg.TRAINING_TIMER:
                self.log_msg.write('It takes %0.2f secs to train the dataset. \n' % \
                                   (time_it.total_time()))
            self.save_progress()
Exemple #16
0
    def __call__(self, max_iter=1000, train_set =None, validation_set =None):

        sess = self.sess
        net = self.net

        with sess.as_default():
            #for init model

            batch_size=1

            validation_step=40
            ckpt_save_step=200


            if cfg.TRAINING_TIMER:
                time_it = timer()

            # start training here  #########################################################################################
            self.log_msg.write('iter |  top_cls_loss   reg_loss   |  fuse_cls_loss  reg_loss  total |  \n')
            self.log_msg.write('-------------------------------------------------------------------------------------\n')


            for iter in range(max_iter):


                is_validation = False
                summary_it = False
                summary_runmeta = False
                print_loss = False
                log_this_iter = False

                # set fit flag
                if iter % validation_step == 0:  summary_it,is_validation,print_loss = True,True,True # summary validation loss
                if (iter+1) % validation_step == 0:  summary_it,print_loss = True,True # summary train loss
                if iter % 20 == 0: print_loss = True #print train loss

                if 1 and  iter%300 == 0: summary_it,summary_runmeta = True,True

                if iter % self.iter_debug == 0 or (iter + 1) % self.iter_debug == 0:
                    log_this_iter = True
                    print('Summary log image')
                    if iter % self.iter_debug == 0: is_validation =False
                    else: is_validation =True

                data_set = self.validation_set if is_validation else self.train_set
                self.default_summary_writer = self.val_summary_writer if is_validation else self.train_summary_writer

                step_name = 'validation' if is_validation else 'training'

                # load dataset
                if config.cfg.USE_RNN:
                    self.batch_rgb_images, self.batch_top_view, self.batch_front_view, \
                    self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id = \
                        data_set.load(batch_size, shuffled=False)
                else:
                    self.batch_rgb_images, self.batch_top_view, self.batch_front_view, \
                    self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id = \
                        data_set.load(batch_size, shuffled=True)

                # fit_iterate log init
                if log_this_iter:
                    self.time_str = strftime("%Y_%m_%d_%H_%M", localtime())
                    self.frame_info = data_set.get_frame_info(self.frame_id)[0]
                    self.log_subdir = step_name + '/' + self.time_str
                    top_image = data.draw_top_image(self.batch_top_view[0])
                    self.top_image = self.top_image_padding(top_image)


                # fit
                t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss= \
                    self.fit_iteration(self.batch_rgb_images, self.batch_top_view, self.batch_front_view,
                                       self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id,
                                       is_validation =is_validation, summary_it=summary_it,
                                       summary_runmeta=summary_runmeta, log=log_this_iter)

                if print_loss:
                    self.log_msg.write('%10s: |  %5d  %0.5f   %0.5f   |   %0.5f   %0.5f \n' % \
                                       (step_name, self.n_global_step, t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss))

                if iter%ckpt_save_step==0:
                    self.save_weights(self.train_target)


                    if cfg.TRAINING_TIMER:
                        self.log_msg.write('It takes %0.2f secs to train %d iterations. \n' % \
                                           (time_it.time_diff_per_n_loops(), ckpt_save_step))
                self.gc()
                self.n_global_step += 1


            if cfg.TRAINING_TIMER:
                self.log_msg.write('It takes %0.2f secs to train the dataset. \n' % \
                                   (time_it.total_time()))
            self.save_progress()
Exemple #17
0
        help='The directory where the model will be stored.')
    self.add_argument(
        '--train_epochs',
        type=int,
        default=40,
        help='Number of epochs to train.')
    self.add_argument(
        '--data_format',
        type=str,
        default=None,
        choices=['channels_first', 'channels_last'],
        help='A flag to override the data format used in the model. '
        'channels_first provides a performance boost on GPU but is not always '
        'compatible with CPU. If left unspecified, the data format will be '
        'chosen automatically based on whether TensorFlow was built for CPU or '
        'GPU.')
    self.add_argument(
        '--export_dir',
        type=str,
        help='The directory where the exported SavedModel will be stored.')


if __name__ == '__main__':
  parser = MNISTArgParser()
  tf.logging.set_verbosity(tf.logging.INFO)
  FLAGS, unparsed = parser.parse_known_args()

  timer('TRAIN')
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
  timer()
        acc = tf.reduce_mean(tf.cast(correct_prediction,
                                     tf.float32))  # top-1 - mean value
        acc_top6 = tf.nn.in_top_k(logits, tf.argmax(y, 1),
                                  6)  # list values for batch.

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            if arguments.restore_checkpoint is not None:
                tf.train.Saver().restore(sess, './{}/{}'.\
                 format(dir_for_checkpoints, arguments.restore_checkpoint))

            for epoch in range(num_epochs):
                print('\nEPOCH {}/{}'.format(epoch, num_epochs))

                timer('train, epoch {0}'.format(epoch))
                train_loss_list, train_acc_list, train_top6_list = [], [], []
                train_top6_list2 = []

                for i in range(train_steps_per_epoch):

                    try:
                        features, labels = sess.run(next_element_train)
                        #print(i, labels[0])
                        sess.run(train_op, feed_dict={x: features, y: labels})

                        #train_acc, train_acc_top6 = sess.run([acc, acc_top6], feed_dict={x: features, y: labels})
                        train_outputs, train_loss, train_acc, train_top6 \
                         = sess.run([output, loss, acc, acc_top6], feed_dict={x: features, y: labels})

                        acc1 = accuracy_top1(train_outputs, labels)
def pred_and_save(tracklet_pred_dir,
                  dataset,
                  frame_offset=0,
                  log_tag=None,
                  weights_tag=None):
    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape,
                             front_shape,
                             rgb_shape,
                             log_tag=log_tag,
                             weights_tag=weights_tag)

    queue = deque(maxlen=1)

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    # dataset.size - 1 for in dataset.get_shape(), a frame is used. So it'll omit first frame for prediction,
    # fix this if has more time
    for i in range(dataset.size - 1 if fast_test == False else frame_offset +
                   1):

        rgb, top, front, _, _, frame_id = dataset.load()

        # handling multiple bags.
        current_bag = frame_id.split('/')[1]
        current_frame_num = int(frame_id.split('/')[2])
        if i == 0:
            prev_tag_bag = None
        else:
            prev_tag_bag = queue[0]
        if current_bag != prev_tag_bag:
            # print('current bag name: ', current_bag, '. previous bag name ', prev_tag_bag)
            if i != 0:
                tracklet.write_tracklet()
            tracklet = Tracklet_saver(tracklet_pred_dir,
                                      current_bag,
                                      exist_ok=True)
            # print('frame counter reset to 0. ')
        queue.append(current_bag)

        # frame_num = i - frame_offset
        # if frame_num < 0:
        #     continue

        # detection
        boxes3d, probs = predict(top, front, rgb)
        # predict.dump_log(log_subdir=os.path.join('tracking',log_tag), n_frame=i, frame_tag=frame_id)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        if len(boxes3d) != 0:
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            # print(translation)
            # print(len(translation))
            # add to tracklets
            for j in range(len(translation)):
                # if 0 < translation[j, 1] < 8:
                # print('pose wrote. '
                tracklet.add_tracklet(current_frame_num, size[j],
                                      translation[j], rotation[j], probs[j],
                                      boxes3d[j])

        # print('frame_counter is here: ', current_frame_num, ' and i is here: ', i, 'frame id is here: ', frame_id)

    tracklet.write_tracklet()
    predict.save_weights(dir=os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
Exemple #20
0
        acc_top6 = tf.nn.in_top_k(logits, tf.argmax(y, 1),
                                  6)  # list values for batch.

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            if arguments.restore_checkpoint is not None:

                tf.train.Saver(variables_to_restore).restore(sess, './{}/{}'.\
                 format(dir_for_checkpoints, arguments.restore_checkpoint))

            for epoch in range(num_epochs):
                print('\nEPOCH {}/{} ({})'.format(epoch, num_epochs,
                                                  net_model_name))

                timer('train, epoch {0}'.format(epoch))
                train_loss_list, train_acc_list, train_top6_list = [], [], []

                for i in range(train_steps_per_epoch):

                    try:
                        features, labels = sess.run(next_element_train)
                        #print(i, labels[0])

                        sess.run(train_op, feed_dict={x: features, y: labels})

                        #train_acc, train_acc_top6 = sess.run([acc, acc_top6], feed_dict={x: features, y: labels})
                        train_loss, train_acc, train_top6 = sess.run(
                            [loss, acc, acc_top6],
                            feed_dict={
                                x: features,
Exemple #21
0
    def __call__(self, max_iter=1000, train_set =None, validation_set =None):

        sess = self.sess
        net = self.net

        with sess.as_default():
            #for init model

            batch_size=1

            validation_step=40
            ckpt_save_step=200


            if cfg.TRAINING_TIMER:
                time_it = timer()

            # start training here  #########################################################################################
            self.log_msg.write('iter |  top_cls_loss   reg_loss   |  fuse_cls_loss  reg_loss  total |  \n')
            self.log_msg.write('-------------------------------------------------------------------------------------\n')


            for iter in range(max_iter):


                is_validation = False
                summary_it = False
                summary_runmeta = False
                print_loss = False
                log_this_iter = False

                # set fit flag
                if iter % validation_step == 0:  summary_it,is_validation,print_loss = True,True,True # summary validation loss
                if (iter+1) % validation_step == 0:  summary_it,print_loss = True,True # summary train loss
                if iter % 5 == 0: print_loss = True #print train loss

                if 1 and  iter%300 == 3: summary_it,summary_runmeta = True,True

                if iter % self.iter_debug == 0 or (iter + 1) % self.iter_debug == 0:
                    log_this_iter = True
                    print('summary log image')
                    if iter % self.iter_debug == 0: is_validation =False
                    else: is_validation =True

                data_set = self.validation_set if is_validation else self.train_set
                self.default_summary_writer = self.val_summary_writer if is_validation else self.train_summary_writer

                step_name = 'validation' if is_validation else 'training'

                # load dataset
                self.batch_rgb_images, self.batch_top_view, self.batch_front_view, \
                self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id = \
                    data_set.load(batch_size, shuffled=True)
                #
                # if self.batch_data_is_invalid(self.batch_gt_boxes3d[0]):
                #     continue

                # for keeping all gt labels and gt boxes inside range, and discard gt out of selected range.
                is_gt_inside_range, batch_gt_labels_in_range, batch_gt_boxes3d_in_range = \
                    self.keep_gt_inside_range(self.batch_gt_labels[0], self.batch_gt_boxes3d[0])

                if not is_gt_inside_range: continue

                # todo current support only batch_size == 1
                self.batch_gt_labels = np.zeros((1, batch_gt_labels_in_range.shape[0]), dtype=np.int32)
                self.batch_gt_boxes3d = np.zeros((1, batch_gt_labels_in_range.shape[0], 8, 3), dtype=np.float32)
                self.batch_gt_labels[0] = batch_gt_labels_in_range
                self.batch_gt_boxes3d[0] = batch_gt_boxes3d_in_range


                # fit_iterate log init
                if log_this_iter:
                    self.time_str = strftime("%Y_%m_%d_%H_%M", localtime())
                    self.frame_info = data_set.get_frame_info(self.frame_id)[0]
                    self.log_subdir = step_name + '/' + self.time_str
                    top_image = data.draw_top_image(self.batch_top_view[0])
                    self.top_image = self.top_image_padding(top_image)


                # fit
                t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss= \
                    self.fit_iteration(self.batch_rgb_images, self.batch_top_view, self.batch_front_view,
                                       self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id,
                                       is_validation =is_validation, summary_it=summary_it,
                                       summary_runmeta=summary_runmeta, log=log_this_iter)

                if print_loss:
                    self.log_msg.write('%10s: |  %5d  %0.5f   %0.5f   |   %0.5f   %0.5f \n' % \
                                       (step_name, self.n_global_step, t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss))

                if iter%ckpt_save_step==0:
                    # saver.save(sess, pretrained_model_path)
                    print('save_weights')
                    self.save_weights(self.train_target)


                    if cfg.TRAINING_TIMER:
                        self.log_msg.write('It takes %0.2f secs to train %d iterations. \n' % \
                                           (time_it.time_diff_per_n_loops(), ckpt_save_step))
                self.gc()
                self.n_global_step += 1


            if cfg.TRAINING_TIMER:
                self.log_msg.write('It takes %0.2f secs to train the dataset. \n' % \
                                   (time_it.total_time()))
            self.save_progress()
Exemple #22
0
    def __call__(self, max_iter=1000, train_set=None, validation_set=None):

        sess = self.sess
        net = self.net

        with sess.as_default():
            #for init model

            batch_size = 1

            validation_step = 40
            ckpt_save_step = 200

            if cfg.TRAINING_TIMER:
                time_it = timer()

            # start training here  #########################################################################################
            self.log_msg.write(
                'iter |  top_cls_loss   reg_loss   |  fuse_cls_loss  reg_loss  total |  \n'
            )
            self.log_msg.write(
                '-------------------------------------------------------------------------------------\n'
            )

            for iter in range(max_iter):

                is_validation = False
                summary_it = False
                summary_runmeta = False
                print_loss = False
                log_this_iter = False

                # set fit flag
                if iter % validation_step == 0:
                    summary_it, is_validation, print_loss = True, True, True  # summary validation loss
                if (iter + 1) % validation_step == 0:
                    summary_it, print_loss = True, True  # summary train loss
                if iter % 5 == 0: print_loss = True  #print train loss

                if 1 and iter % 300 == 3:
                    summary_it, summary_runmeta = True, True

                if iter % self.iter_debug == 0 or (iter +
                                                   1) % self.iter_debug == 0:
                    log_this_iter = True
                    print('summary log image')
                    if iter % self.iter_debug == 0: is_validation = False
                    else: is_validation = True

                data_set = self.validation_set if is_validation else self.train_set
                self.default_summary_writer = self.val_summary_writer if is_validation else self.train_summary_writer

                step_name = 'validation' if is_validation else 'training'

                # load dataset
                self.batch_rgb_images, self.batch_top_view, self.batch_front_view, \
                self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id = \
                    data_set.load(batch_size, shuffled=True)
                #
                # if self.batch_data_is_invalid(self.batch_gt_boxes3d[0]):
                #     continue

                # for keeping all gt labels and gt boxes inside range, and discard gt out of selected range.
                is_gt_inside_range, batch_gt_labels_in_range, batch_gt_boxes3d_in_range = \
                    self.keep_gt_inside_range(self.batch_gt_labels[0], self.batch_gt_boxes3d[0])

                if not is_gt_inside_range: continue

                # todo current support only batch_size == 1
                self.batch_gt_labels = np.zeros(
                    (1, batch_gt_labels_in_range.shape[0]), dtype=np.int32)
                self.batch_gt_boxes3d = np.zeros(
                    (1, batch_gt_labels_in_range.shape[0], 8, 3),
                    dtype=np.float32)
                self.batch_gt_labels[0] = batch_gt_labels_in_range
                self.batch_gt_boxes3d[0] = batch_gt_boxes3d_in_range

                # fit_iterate log init
                if log_this_iter:
                    self.time_str = strftime("%Y_%m_%d_%H_%M", localtime())
                    self.frame_info = data_set.get_frame_info(self.frame_id)[0]
                    self.log_subdir = step_name + '/' + self.time_str
                    top_image = data.draw_top_image(self.batch_top_view[0])
                    self.top_image = self.top_image_padding(top_image)

                # fit
                t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss= \
                    self.fit_iteration(self.batch_rgb_images, self.batch_top_view, self.batch_front_view,
                                       self.batch_gt_labels, self.batch_gt_boxes3d, self.frame_id,
                                       is_validation =is_validation, summary_it=summary_it,
                                       summary_runmeta=summary_runmeta, log=log_this_iter)

                if print_loss:
                    self.log_msg.write('%10s: |  %5d  %0.5f   %0.5f   |   %0.5f   %0.5f \n' % \
                                       (step_name, self.n_global_step, t_cls_loss, t_reg_loss, f_cls_loss, f_reg_loss))

                if iter % ckpt_save_step == 0:
                    # saver.save(sess, pretrained_model_path)
                    print('save_weights')
                    self.save_weights(self.train_target)

                    if cfg.TRAINING_TIMER:
                        self.log_msg.write('It takes %0.2f secs to train %d iterations. \n' % \
                                           (time_it.time_diff_per_n_loops(), ckpt_save_step))
                self.gc()
                self.n_global_step += 1

            if cfg.TRAINING_TIMER:
                self.log_msg.write('It takes %0.2f secs to train the dataset. \n' % \
                                   (time_it.total_time()))
            self.save_progress()
def pred_and_save(tracklet_pred_dir,
                  dataset,
                  generate_video=False,
                  frame_offset=16,
                  log_tag=None,
                  weights_tag=None):
    # Tracklet_saver will check whether the file already exists.
    tracklet = Tracklet_saver(tracklet_pred_dir)
    os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True)

    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape,
                             front_shape,
                             rgb_shape,
                             log_tag=log_tag,
                             weights_tag=weights_tag)

    if generate_video:
        vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4'))

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    frame_num = 0
    for i in range(dataset.size if fast_test == False else frame_offset + 1):

        rgb, top, front, _, _, _ = dataset.load()

        frame_num = i - frame_offset
        if frame_num < 0:
            continue

        boxes3d, probs = predict(top, front, rgb)
        predict.dump_log(log_subdir=log_subdir, n_frame=i)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        # for debugging: save image and show image.
        top_image = draw_top_image(top[0])
        rgb_image = rgb[0]

        if len(boxes3d) != 0:
            top_image = draw_box3d_on_top(top_image,
                                          boxes3d[:, :, :],
                                          color=(80, 80, 0),
                                          thickness=3)
            rgb_image = draw_box3d_on_camera(rgb_image,
                                             boxes3d[:, :, :],
                                             color=(0, 0, 80),
                                             thickness=3)
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            # todo: remove it after gtbox is ok
            size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(translation)):
                if 0 < translation[j, 1] < 8:
                    tracklet.add_tracklet(frame_num, size[j], translation[j],
                                          rotation[j])
        resize_scale = top_image.shape[0] / rgb_image.shape[0]
        rgb_image = cv2.resize(
            rgb_image,
            (int(rgb_image.shape[1] * resize_scale), top_image.shape[0]))
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
        new_image = np.concatenate((top_image, rgb_image), axis=1)
        cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i),
                    new_image)

        if generate_video:
            vid_in.writeFrame(new_image)
            vid_in.close()

    tracklet.write_tracklet()
    predict.dump_weigths(os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
def correlation_calculation(tests=None, min_number_results=5, one=False):

    timer('load_data')

    np.set_printoptions(precision=2)
    pd.options.display.float_format = '{:,.2f}'.format

    if tests == None:  # if list of tests is not specific
        tests = database_connect.load_test_list()

    att_dict = database_connect.load_attributes(tests=tests)
    print('num scales:', len(att_dict))

    timer('get_question_values')
    df, df_ques = get_question_values(tests)
    #return df, df_ques

    # !!! HERE WE SECOND TIME CALL get_dataframes TO GET dfcore
    _, dfcore = database_connect.get_dataframes(tests=tests)

    if one:
        table_name = 'test_qa_correlation'
    else:
        table_name = 'matrix_corr_qa'

    # Clean table
    if not NO_WRITING_DB:
        database_connect.clean_table(table_name, truncate=True)
        logging.info('Table {0} was cleaned.'.format(table_name))

    timer('compare_questions')

    if one:

        for test in tests:
            result = compare_questions(df_ques,
                                       dfcore,
                                       min_number_results,
                                       tests=[test])
            df = result['df_matrix_corr']
            df['test_id'] = df['test_id_1']
            df.drop(['test_id_1'], axis=1, inplace=True)
            df.drop(['test_id_2'], axis=1, inplace=True)
            df.drop(['coeff'], axis=1, inplace=True)
            save_dataframe_to_db(df, table_name)

    else:

        if len(tests) == 1:

            result = compare_questions(df_ques,
                                       dfcore,
                                       min_number_results,
                                       tests=tests)
            df = result['df_matrix_corr']
            #return df
            save_dataframe_to_db(df, table_name)

        else:

            for test1 in tests:
                for test2 in tests:
                    if test1 > test2: continue

                    result = compare_questions(df_ques,
                                               dfcore,
                                               min_number_results,
                                               tests=[test1, test2])
                    df = result['df_matrix_corr']
                    save_dataframe_to_db(df, table_name)
Exemple #25
0
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False,
                  frame_offset=16, log_tag=None, weights_tag=None):
    # Tracklet_saver will check whether the file already exists.
    tracklet = Tracklet_saver(tracklet_pred_dir)
    os.makedirs (os.path.join(log_dir,'image'),exist_ok=True)


    top_shape, front_shape, rgb_shape=dataset.get_shape()
    predict=mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag)

    if generate_video:
        vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir,'output.mp4'))

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    frame_num = 0
    for i in range(dataset.size):

        rgb, top, front, _, _,_= dataset.load(1)

        frame_num = i - frame_offset
        if frame_num < 0:
            continue

        boxes3d,probs=predict(top, front, rgb)
        predict.dump_log(log_subdir=log_subdir,n_frame=i)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i%timer_step ==0 and i!=0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                   (time_it.time_diff_per_n_loops(), timer_step))

        # for debugging: save image and show image.
        top_image = draw_top_image(top[0])
        rgb_image = rgb[0]


        if len(boxes3d)!=0:
            top_image = draw_box3d_on_top(top_image, boxes3d[:,:,:], color=(80, 80, 0), thickness=3)
            rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3)
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            #todo: remove it after gtbox is ok
            size[:,1:3] = size[:,1:3]/cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(translation)):
                if 0<translation[j,1]<8:
                    tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j])
        resize_scale=top_image.shape[0]/rgb_image.shape[0]
        rgb_image = cv2.resize(rgb_image,(int(rgb_image.shape[1]*resize_scale), top_image.shape[0]))
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
        new_image = np.concatenate((top_image, rgb_image), axis = 1)
        cv2.imwrite(os.path.join(log_dir,'image','%5d_image.jpg'%i), new_image)

        if generate_video:
            vid_in.writeFrame(new_image)
            vid_in.close()

    tracklet.write_tracklet()
    predict.dump_weigths(os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path