예제 #1
0
def main():
    """ Reads command line arguments and starts either training, evaluation or inference. """

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        default='',
        help='JSON-formatted file with configuration parameters')
    parser.add_argument('--mode',
                        default='',
                        help='The operational mode - train|evaluate|predict')
    parser.add_argument('--input',
                        default='',
                        help='The path to a PLY file to run through the model')
    parser.add_argument('--device',
                        default='gpu',
                        help='The device to use - cpu|gpu')
    parser.add_argument('--colors',
                        default='',
                        help='A file containing colors')
    cla = parser.parse_args()

    if cla.config == '':
        print 'Error: --config flag is required. Enter the path to a configuration file.'

    if cla.mode == 'evaluate':
        inference.evaluate(cla.config, cla.device)
    elif cla.mode == 'predict':
        inference.predict(cla.config, cla.input, cla.device, cla.colors)
    elif cla.mode == 'train':
        training.train(cla.config)
    else:
        print 'Error: invalid operational mode - options: train, evaluate or predict'
예제 #2
0
def on_message(client, userdata, msg):
    print("topic: " + msg.topic)
    print("Payload Data: " + str(msg.payload))
    if str(msg.payload) == 'bottle':
        os.chdir("./../pointnet")
        location, std = evaluate(label_to_detect=12,
                                 x_offset=0.35,
                                 y_offset=0.137,
                                 z_offset=0.1)
        print(location, std)
예제 #3
0
def train(target_dir,
          embedding_dim,
          hidden_dim,
          glove_file):
    torch.manual_seed(1)
    train_word_to_ix, train_tag_to_ix, train_sents_idx, train_labels_idx = pickle.load(
                                                           open(target_dir + "CoNLL_train.pkl", "rb"))
    test_word_to_ix, test_tag_to_ix, test_sents_idx, test_labels_idx = pickle.load(
                                                           open(target_dir + "CoNLL_test.pkl", "rb"))
    model = LSTMTagger(embedding_dim,
                       hidden_dim,
                       len(train_word_to_ix),
                       len(train_tag_to_ix),
                       target_dir,
                       glove_file)
    criterion = nn.NLLLoss()
    optimizer = optim.RMSprop(model.parameters())

    EPOCHS = 2
    for epoch in range(EPOCHS):
        loss = 0
        for i, (sentence, tags) in tqdm(enumerate(zip(train_sents_idx, train_labels_idx))):
            model.zero_grad()
            model.hidden = model.init_hidden()
            # 単語インデックスの tensor に変換
            sentence_in = utils.prepare_sequence(sentence)
            # Tags インデックスの tensor に変換
            targets = utils.prepare_sequence(tags)
            tag_scores = model.forward(sentence_in)
            loss = criterion(tag_scores, targets)
            loss.backward()
            optimizer.step()
            loss += loss.item()
        f1_score_train_sents_avg = inference.evaluate(model,
                                                      train_sents_idx[:len(test_sents_idx)],
                                                      train_labels_idx[:len(test_sents_idx)])
        f1_score_test_sents_avg = inference.evaluate(model,
                                                     test_sents_idx,
                                                     test_labels_idx)
        print("[{}] EPOCH {} - LOSS: {:.8f} TRAIN_DATA_F1_SCORE: {} TEST_DATA_F1_SCORE: {}".
                format(datetime.datetime.today(), epoch + 1, loss,
                       f1_score_train_sents_avg, f1_score_test_sents_avg))
def AutoImageCaption():
    image_url = request.args.get('image')
    print('image_url')
    image_extension = image_url[-4:]
    image_path = tf.keras.utils.get_file(str(int(time.time())) +
                                         image_extension,
                                         origin=image_url)
    result, attention_plot = evaluate(image_path)
    data = {'Prediction Caption:': ' '.join(result)}

    return jsonify(data)
예제 #5
0
def main():
    config = Config()

    # Load images and ground truth planes
    data = input_data.read_data_sets(config.data_dir, config.label_dir,
                                     config.train_list_file,
                                     config.test_list_file,
                                     config.landmark_count, config.plane_name)

    print("Start inference...")
    tf.reset_default_graph()
    sess = tf.InteractiveSession()

    # Load trained model
    g = tf.get_default_graph()
    saver = tf.train.import_meta_graph(
        tf.train.latest_checkpoint(config.model_dir) + '.meta')
    saver.restore(sess, tf.train.latest_checkpoint(config.model_dir))
    action_prob_tran = g.get_collection('action_prob_tran')[
        0]  # translation classification probability
    ytr = g.get_collection('ytr')[
        0]  # translation regression (displacement vector)
    action_prob_rot = g.get_collection('action_prob_rot')[
        0]  # rotation classification probability
    yrr_norm = g.get_collection('yrr_norm')[
        0]  # rotation regression (quaternions)
    x = g.get_collection('x')[0]
    keep_prob = g.get_collection('keep_prob')[0]

    # Evaluation on test-set
    print("Evaluation on test set:")
    inference.evaluate(data.test, config, 'test', sess, x, action_prob_tran,
                       ytr, action_prob_rot, yrr_norm, keep_prob)
    # Evaluation on train-set
    print("Evaluation on train set:")
    inference.evaluate(data.train, config, 'train', sess, x, action_prob_tran,
                       ytr, action_prob_rot, yrr_norm, keep_prob)
    sess.close()
예제 #6
0
def main():
    try:
        print "============ Press `Enter` to recognize the object ..."
        raw_input()
        location, std = evaluate(label_to_detect=12,
                                 x_offset=0,
                                 y_offset=0,
                                 z_offset=1.55)
        print(location)
        print(std)
        #location=[1.0653, -0.3668, 1.7145]
        print "============ Press `Enter` to set up the moveit_commander ..."
        raw_input()
        tutorial = MoveGroupPythonIntefaceTutorial()

        print "============ Press `Enter` to add a box ..."
        raw_input()
        tutorial.add_box(location)

        print "============ Press `Enter` to execute a movement using a pose goal ..."
        raw_input()
        tutorial.go_to_pose_goal(location)

        print "============ Press `Enter` to attach the object to the robot ..."
        raw_input()
        tutorial.attach_box()

        print "============ Press `Enter` to move back to initial position ..."
        raw_input()
        tutorial.go_to_joint_state()

        print "============ Press `Enter` to dettach the object ..."
        raw_input()
        tutorial.detach_box()

        print "============ Complete!"
        raw_input()
        tutorial.remove_box()
    except rospy.ROSInterruptException:
        return
    except KeyboardInterrupt:
        return
예제 #7
0
 def evaluate(self, *args, **kwargs):
     ''' Evaluate the performance of the model. See inference.evaluate
         for more details. '''
     from inference import evaluate
     return evaluate(self, *args, **kwargs)
예제 #8
0
from inference import evaluate

#label and their number
# 0  ceiling
# 1  floor
# 2  wall
# 3  beam
# 4  column
# 5  window
# 6  door
# 7  table
# 8  chair
# 9  sofa
# 10 bookcase
# 11 board
# 12 clutter

#----------------------------------------------------------------------------
# this is usage example of pointnet recognition in gazebo with kinect
#----------------------------------------------------------------------------

location, std = evaluate(label_to_detect=12,
                         x_offset=0.35,
                         y_offset=0.137,
                         z_offset=0.1)
#you will have mean location std of the object belong to clutter class
#if you run this on tensorflow-cpu, it will be slow when doing
#neural network inference, proberly 60 secs.
print(location)
print(std)
예제 #9
0
import subprocess
import sys
#label and their number
# 0  ceiling
# 1  floor
# 2  wall
# 3  beam
# 4  column
# 5  window
# 6  door
# 7  table
# 8  chair
# 9  sofa
# 10 bookcase
# 11 board
# 12 clutter

#----------------------------------------------------------------------------
# this is usage example of pointnet recognition in gazebo with kinect
#----------------------------------------------------------------------------

location, std = evaluate(label_to_detect=12,
                         x_offset=0,
                         y_offset=0,
                         z_offset=0)
#you will have mean location std of the object belong to clutter class
#if you run this on tensorflow-cpu, it will be slow when doing
#neural network inference, proberly 60 secs.
print(location)
print(std)