Пример #1
0
def hello_world(args):
    """
    Runs Jina's Hello World.

    Usage:
        Use it via CLI :command:`jina hello-world`.

    Description:
        It downloads Fashion-MNIST dataset and :term:`Indexer<indexes>` 50,000 images.
        The index is stored into 4 *shards*. It randomly samples 128 unseen images as :term:`Queries<Searching>`
        Results are shown in a webpage.

    More options can be found in :command:`jina hello-world --help`

    :param args: Argparse object
    """

    Path(args.workdir).mkdir(parents=True, exist_ok=True)

    targets = {
        'index-labels': {
            'url': args.index_labels_url,
            'filename': os.path.join(args.workdir, 'index-labels'),
        },
        'query-labels': {
            'url': args.query_labels_url,
            'filename': os.path.join(args.workdir, 'query-labels'),
        },
        'index': {
            'url': args.index_data_url,
            'filename': os.path.join(args.workdir, 'index-original'),
        },
        'query': {
            'url': args.query_data_url,
            'filename': os.path.join(args.workdir, 'query-original'),
        },
    }

    # download the data
    download_data(targets, args.download_proxy)

    # reduce the network load by using `fp16`, or even `uint8`
    os.environ['JINA_ARRAY_QUANT'] = 'fp16'
    os.environ['HW_WORKDIR'] = args.workdir

    # now comes the real work
    # load index flow from a YAML file
    f = Flow().add(uses=MyEncoder,
                   parallel=2).add(uses=MyIndexer).add(uses=MyEvaluator)

    # run it!
    with f:
        f.index(
            index_generator(num_docs=targets['index']['data'].shape[0],
                            target=targets),
            request_size=args.request_size,
        )

        # wait for couple of seconds
        countdown(
            3,
            reason=colored(
                'behold! im going to switch to query mode',
                'cyan',
                attrs=['underline', 'bold', 'reverse'],
            ),
        )

        # f.search(
        #     query_generator(
        #         num_docs=args.num_query, target=targets, with_groundtruth=True
        #     ),
        #     shuffle=True,
        #     on_done=print_result,
        #     request_size=args.request_size,
        #     parameters={'top_k': args.top_k},
        # )

        f.post(
            '/eval',
            query_generator(num_docs=args.num_query,
                            target=targets,
                            with_groundtruth=True),
            shuffle=True,
            on_done=print_result,
            request_size=args.request_size,
            parameters={'top_k': args.top_k},
        )

        # write result to html
        write_html(os.path.join(args.workdir, 'demo.html'))
Пример #2
0
def hello_world(args):
    """
    Runs Jina's Hello World.

    Usage:
        Use it via CLI :command:`jina hello fashion`.

    Description:
        It downloads Fashion-MNIST dataset and :term:`Indexer<indexes>` 50,000 images.
        The index is stored into 4 *shards*. It randomly samples 128 unseen images as :term:`Queries<Searching>`
        Results are shown in a webpage.

    More options can be found in :command:`jina hello-world --help`

    :param args: Argparse object
    """

    Path(args.workdir).mkdir(parents=True, exist_ok=True)

    targets = {
        'index-labels': {
            'url': args.index_labels_url,
            'filename': os.path.join(args.workdir, 'index-labels'),
        },
        'query-labels': {
            'url': args.query_labels_url,
            'filename': os.path.join(args.workdir, 'query-labels'),
        },
        'index': {
            'url': args.index_data_url,
            'filename': os.path.join(args.workdir, 'index-original'),
        },
        'query': {
            'url': args.query_data_url,
            'filename': os.path.join(args.workdir, 'query-original'),
        },
    }

    # download the data
    download_data(targets, args.download_proxy)

    # now comes the real work
    # load index flow from a YAML file
    f = (Flow().add(uses=MyEncoder, replicas=2).add(uses=MyIndexer,
                                                    workspace=args.workdir))

    # run it!
    with f:
        f.index(
            index_generator(num_docs=targets['index']['data'].shape[0],
                            target=targets),
            show_progress=True,
        )

        groundtruths = get_groundtruths(targets)
        evaluate_print_callback = partial(print_result, groundtruths)
        evaluate_print_callback.__name__ = 'evaluate_print_callback'
        f.post(
            '/search',
            query_generator(num_docs=args.num_query, target=targets),
            shuffle=True,
            on_done=evaluate_print_callback,
            parameters={'top_k': args.top_k},
            show_progress=True,
        )

        # write result to html
        write_html(os.path.join(args.workdir, 'demo.html'))
Пример #3
0
        if not os.path.exists(log_dir): os.mkdir(log_dir)
        FLAGS.model = model
        FLAGS.data_dict = data_dict
        FLAGS.model_path = model_path
        FLAGS.log_dir = log_dir
        FLAGS.pairs_file = pairs_file
        FLAGS.eval_poses = eval_poses
        set_p = store_params(FLAGS)
    return set_p


if __name__ == '__main__':
    if set_params(FLAGS.model, FLAGS.data_dict, FLAGS.model_path,
                  FLAGS.log_dir, FLAGS.pairs_file, FLAGS.eval_poses):
        MODEL = importlib.import_module(FLAGS.model)  # import network module
        helper.download_data(FLAGS.data_dict)
        train()

    # Used the following code to test multiple datas and multiple logs.

    # model_paths2test = ['log_multi_catg/model250.ckpt']#,['log_multi_catg_noise/model300.ckpt']
    # # model_paths2test = ['log_car_multi_models/model200.ckpt']#, 'log_car_multi_models_noise/model350.ckpt']
    # data_dicts2test = ['train_data','unseen_data']#, 'car_data']#, 'unseen_data']
    # eval_poses2test = ['itr_net_test_data45.csv']

    # FLAGS.use_noise_data = False

    # for mpt in model_paths2test:
    # 	for ddt in data_dicts2test:
    # 		for ept in eval_poses2test:
    # 			log_dir = 'test_itr_'+str(mpt[4:len(mpt)-14])+'_'+str(ddt)+'_'+str(ept[len(ept)-6:len(ept)-4])
Пример #4
0
		eval_writer.add_summary(summary, step)			# Add all the summary to the tensorboard.

		# Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
		TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

		final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		print("Batch: {}, Loss: {}\r".format(fn, loss_val),end='')

		# Add loss for each batch.
		loss_sum += loss_val
	print('\n')
	log_string('Eval Mean loss: %f' % (loss_sum/num_batches))		# Store and display mean loss of epoch.

if __name__ == "__main__":
	if FLAGS.mode == 'no_mode':
		print('Specity a mode argument: train')
	elif FLAGS.mode == 'train':
		if helper.download_data(FLAGS.data_dict): print_('################### Data Downloading Finished ###################', color='g', style='bold')
		train()
		LOG_FOUT.close()