コード例 #1
0
def process_dir(root_dir, subdir, template):
    curr_dir = os.path.join(root_dir, subdir)

    # Look for template in current dir
    template_file = os.path.join(curr_dir, config.TEMPLATE_FILE)
    if os.path.exists(template_file):
        template = Template(template_file)

    # look for images in current dir to process
    paths = config.Paths(os.path.join(args['output_dir'], subdir))
    exts = ('*.png', '*.jpg')
    omr_files = sorted(
        [f for ext in exts for f in glob(os.path.join(curr_dir, ext))])

    # Exclude marker image if exists
    if (template and template.marker_path):
        omr_files = [f for f in omr_files if f != template.marker_path]
        print("\n\n\nMarker Found\n\n\n")

    subfolders = sorted([
        file for file in os.listdir(curr_dir)
        if os.path.isdir(os.path.join(curr_dir, file))
    ])
    if omr_files:
        args_local = args.copy()
        if ("OverrideFlags" in template.options):
            args_local.update(template.options["OverrideFlags"])
        print(
            '\n------------------------------------------------------------------'
        )
        print(f'Processing directory {curr_dir} with settings- ')
        print("\tTotal images       : %d" % (len(omr_files)))
        print("\tCropping Enabled   : " + str(not args_local["noCropping"]))
        print("\tAuto Alignment     : " + str(args_local["autoAlign"]))
        print("\tUsing Template     : " +
              str(template.path) if (template) else "N/A")
        print("\tUsing Marker       : " + str(template.marker_path) if (
            template.marker is not None) else "N/A")
        print('')

        if not template:
            print(f'Error: No template file when processing {curr_dir}.')
            print(
                f'  Place {config.TEMPLATE_FILE} in the directory or specify a template using -t.'
            )
            return

        utils.setup_dirs(paths)
        output_set = setup_output(paths, template)
        return process_files(omr_files, template, args_local, output_set)
    elif (len(subfolders) == 0):
        # the directory should have images or be non-leaf
        print(f'Note: No valid images or subfolders found in {curr_dir}')

    # recursively process subfolders
    results_lists = []
    for folder in subfolders:
        results_lists.append(
            process_dir(root_dir, os.path.join(subdir, folder), template))
    return results_lists
コード例 #2
0
def main():
    paths = config.Paths()
    start_time = time.time()
    np.random.seed(0)
    evaluate(paths)
    # test(paths)
    print('Time elapsed: {}s'.format(time.time() - start_time))
コード例 #3
0
ファイル: process_ply.py プロジェクト: zhouzhiqian/intentMARL
def main():
    paths = config.Paths()
    scene = 'city'
    scale = 0.1

    # create_labels(paths)
    process_ply(paths, scene, scale)  # each unit is 10 centimeters
コード例 #4
0
def test_valid():
    paths = config.Paths()
    grammar_file = os.path.join(paths.tmp_root, 'grammar', 'cad',
                                'stacking_objects.pcfg')

    # sentence = 'null reaching moving placing'
    # grammar = grammarutils.read_grammar(grammar_file, index=False)
    # test_earley(grammar, sentence.split())

    sentence = 'null reaching'
    tokens = sentence.split()
    grammar = grammarutils.read_grammar(
        grammar_file,
        index=True,
        mapping=datasets.cad_metadata.subactivity_index)
    seg_length = 15
    correct_prob = 0.8
    classifier_output = np.ones((seg_length * 2, 10)) * 1e-10
    classifier_output[:seg_length, datasets.cad_metadata.
                      subactivity_index[tokens[0]]] = correct_prob
    classifier_output[
        seg_length:,
        datasets.cad_metadata.subactivity_index[tokens[1]]] = correct_prob

    classifier_output[:seg_length,
                      datasets.cad_metadata.subactivity_index[tokens[0]] +
                      1] = 1 - correct_prob
    classifier_output[seg_length:,
                      datasets.cad_metadata.subactivity_index[tokens[1]] +
                      1] = 1 - correct_prob
    test_generalized_earley(grammar, classifier_output)
コード例 #5
0
def main():
    paths = config.Paths()
    start_time = time.time()
    # induce_activity_grammar(paths)
    # read_induced_grammar(paths)
    test(paths)
    print('Time elapsed: {}'.format(time.time() - start_time))
コード例 #6
0
 def save_model(cls, iteration, performance):
     if not cls.test:
         paths = config.Paths()
         cls.saver.save(
             cls.session,
             os.path.join(
                 paths.tmp_root, 'rnn', cls.timestamp,
                 'model_{:07d}_{:03d}_{:03d}.ckpt'.format(
                     iteration, performance[0], performance[1])))
コード例 #7
0
ファイル: path_utils.py プロジェクト: zhouzhiqian/intentMARL
def main():
    paths = config.Paths()
    scene = 'city'
    height_threshold = 1

    start_time = time.time()
    # compute_all_cost_maps(paths, scene, height_threshold)
    # print('Cost maps computed. Time elapsed: {}s'.format(time.time() - start_time))
    path_planning_demo(paths, scene, height_threshold)
    print('Time elapsed: {}s'.format(time.time() - start_time))
コード例 #8
0
 def save_model(cls, iteration, performance):
     paths = config.Paths()
     np.save(
         os.path.join(paths.tmp_root, 'theta', cls.timestamp,
                      'theta_{:07d}'.format(iteration)), cls.theta)
     with open(
             os.path.join(paths.tmp_root, 'theta', cls.timestamp,
                          'theta_{:07d}.csv'.format(iteration)), 'w') as f:
         for agent_theta in cls.theta:
             np.savetxt(f, agent_theta, fmt='%.5f', footer='====')
         np.savetxt(f, performance, fmt='%d')
         np.savetxt(f, [cls.r_bar], fmt='%.5f')
コード例 #9
0
def main():
    paths = config.Paths()
    paths.path_huang()
    # model_path = paths.metadata_root + 'models/flipped/layer_3_without_dropout_maxlen_200_epoch_200.h5'
    # model_path_per_frame = paths.metadata_root + 'models/flipped/layer_3_per_frame_without_dropout_epoch_1000.h5'
    # without_segmentation_sequence_test(paths.metadata_root + 'flipped/', model_path, 200)
    # without_segmentation_sequence_test_per_frame(paths.metadata_root + 'flipped/', model_path_per_frame)
    # without_segmentation_sequence_test_per_frame_vgg16(paths.metadata_root)
    # get_bottleneck_feature(paths.metadata_root)
    # without_segmentation_sequence_test_per_frame_sequential(paths.metadata_root)
    without_segmentation_sequence_test_per_frame_lstm(paths.data_root,
                                                      paths.metadata_root)
コード例 #10
0
ファイル: test.py プロジェクト: tf369/GEP_PAMI
def visualize_grammar():
    paths = config.Paths()
    dataset_name = 'wnp'
    for pcfg in os.listdir(os.path.join(paths.tmp_root, 'grammar', dataset_name)):
        if not pcfg.endswith('.pcfg'):
            continue
        grammar_file = os.path.join(paths.tmp_root, 'grammar', dataset_name, pcfg)
        grammar = grammarutils.read_grammar(grammar_file, insert=False)
        dot_filename = os.path.join(paths.tmp_root, 'visualize', 'grammar', dataset_name, pcfg.replace('.pcfg', '.dot'))
        pdf_filename = os.path.join(paths.tmp_root, 'visualize', 'grammar', dataset_name, pcfg.replace('.pcfg', '.pdf'))
        grammarutils.grammar_to_dot(grammar, dot_filename)
        os.system('dot -Tpdf {} -o {}'.format(dot_filename, pdf_filename))
コード例 #11
0
ファイル: test.py プロジェクト: tf369/GEP_PAMI
def test_time():
    paths = config.Paths()
    start_time = time.time()
    np.random.seed(int(start_time))
    classifier_output = np.random.rand(100000, 10)
    classifier_output = classifier_output / np.sum(classifier_output, axis=1)[:, None]  # Normalize to probability
    for pcfg in os.listdir(os.path.join(paths.tmp_root, 'grammar', 'cad')):
        if not pcfg.endswith('.pcfg'):
            continue
        grammar_file = os.path.join(paths.tmp_root, 'grammar', 'cad', pcfg)
        grammar = grammarutils.read_grammar(grammar_file, index=True, mapping=datasets.cad_metadata.subactivity_index)
        test_generalized_earley(grammar, classifier_output)
    print('Time elapsed: {}s'.format(time.time() - start_time))
コード例 #12
0
    def init_lstm(cls):
        cls.max_goal_size = cls.static_goal_num + cls.max_people_tracked

        cls.state_size = 2 * (cls.total_drone_num + cls.max_people_tracked - 1)
        cls.input_size = cls.state_size + cls.max_goal_size
        cls.inputs = tf.placeholder(tf.float32, [None, None, cls.input_size],
                                    name='input')

        lstm = tf.contrib.rnn.LSTMCell(num_units=RNNConfig.h_size,
                                       state_is_tuple=True)
        batch_size = tf.shape(cls.inputs)[1]
        initial_state = lstm.zero_state(batch_size, tf.float32)
        rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
            lstm, cls.inputs, initial_state=initial_state, time_major=True)
        cls.q_values_tf = tf.contrib.layers.fully_connected(
            rnn_outputs, num_outputs=128, activation_fn=tf.nn.relu)
        cls.q_values_tf = tf.contrib.layers.fully_connected(
            cls.q_values_tf, num_outputs=32, activation_fn=tf.nn.relu)
        # cls.q_values_tf = tf.contrib.layers.fully_connected(cls.q_values_tf, num_outputs=1, activation_fn=tf.nn.tanh)
        cls.q_values_tf = tf.contrib.layers.fully_connected(cls.q_values_tf,
                                                            num_outputs=1,
                                                            activation_fn=None)

        cls.delta_tf = tf.placeholder(tf.float32, name='delta')
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=RNNConfig.alpha)
        grads_and_vars = optimizer.compute_gradients(cls.q_values_tf)
        grads_and_vars = [(-gv[0] * cls.delta_tf, gv[1])
                          for gv in grads_and_vars]
        cls.train = optimizer.apply_gradients(grads_and_vars)

        # # Direct approximation of reward
        # cls.r_tf = tf.placeholder(tf.float32, name='r')
        # loss = tf.square(cls.q_values_tf - cls.r_tf)
        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=RNNConfig.alpha)
        # grads_and_vars = optimizer.compute_gradients(loss)
        # cls.train = optimizer.apply_gradients(grads_and_vars)

        cls.session = tf.Session()
        cls.session.run(tf.global_variables_initializer())

        # file_writer = tf.summary.FileWriter('/home/siyuan/Downloads/test/log', cls.session.graph)
        cls.saver = tf.train.Saver()

        if cls.test:
            cls.saver.restore(
                cls.session,
                os.path.join(config.Paths().tmp_root, 'rnn',
                             '2017-09-08 18:24:57',
                             'model_0190000_249_1016.ckpt'),
            )
コード例 #13
0
ファイル: test.py プロジェクト: tf369/GEP_PAMI
def test_grammar():
    paths = config.Paths()
    for pcfg in os.listdir(os.path.join(paths.tmp_root, 'grammar', 'cad')):
        if not pcfg.endswith('.pcfg'):
            continue
        grammar_file = os.path.join(paths.tmp_root, 'grammar', 'cad', pcfg)
        grammar = grammarutils.read_grammar(grammar_file, index=True, mapping=datasets.cad_metadata.subactivity_index)
        corpus_file = os.path.join(paths.tmp_root, 'corpus', 'cad', pcfg.replace('pcfg', 'txt'))
        with open(corpus_file, 'r') as f:
            for line in f:
                tokens = [str(datasets.cad_metadata.subactivity_index[token]) for token in line.strip(' *#\n').split(' ')]
                earley_parser = nltk.EarleyChartParser(grammar, trace=0)
                e_chart = earley_parser.chart_parse(tokens)
                print(e_chart.edges()[-1])
コード例 #14
0
def parse_arguments():
    # Parser check
    def restricted_float(x, inter):
        x = float(x)
        if x < inter[0] or x > inter[1]:
            raise argparse.ArgumentTypeError("%r not in range [1e-5, 1e-4]"%(x,))
        return x

    paths = config.Paths()

    feature_type = 'resnet'

    # Path settings
    parser = argparse.ArgumentParser(description='VCOCO dataset')
    parser.add_argument('--project-root', default=paths.project_root, help='intermediate result path')
    parser.add_argument('--tmp-root', default=paths.tmp_root, help='intermediate result path')
    parser.add_argument('--data-root', default=paths.vcoco_data_root, help='data path')
    parser.add_argument('--log-root', default=os.path.join(paths.log_root, 'vcoco/parsing_{}'.format(feature_type)), help='log files path')
    parser.add_argument('--resume', default=os.path.join(paths.tmp_root, 'checkpoints/vcoco/parsing_{}'.format(feature_type)), help='path to latest checkpoint')
    parser.add_argument('--eval-root', default=os.path.join(paths.tmp_root, 'evaluation/vcoco/{}'.format(feature_type)), help='path to save evaluation file')
    parser.add_argument('--feature-type', default=feature_type, help='feature_type')
    parser.add_argument('--visualize', action='store_true', default=False, help='Visualize final results')
    parser.add_argument('--vis-top-k', type=int, default=1, metavar='N', help='Top k results to visualize')

    # Optimization Options
    parser.add_argument('--batch-size', type=int, default=1, metavar='N',
                        help='Input batch size for training (default: 10)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='Enables CUDA training')
    parser.add_argument('--epochs', type=int, default=30, metavar='N',
                        help='Number of epochs to train (default: 10)')
    parser.add_argument('--start-epoch', type=int, default=0, metavar='N',
                        help='Index of epoch to start (default: 0)')
    parser.add_argument('--link-weight', type=float, default=2, metavar='N',
                        help='Loss weight of existing edges')
    parser.add_argument('--lr', type=lambda x: restricted_float(x, [1e-5, 1e-2]), default=1e-4, metavar='LR',
                        help='Initial learning rate [1e-5, 1e-2] (default: 1e-3)')
    parser.add_argument('--lr-decay', type=lambda x: restricted_float(x, [.01, 1]), default=0.8, metavar='LR-DECAY',
                        help='Learning rate decay factor [.01, 1] (default: 0.8)')
    parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
                        help='SGD momentum (default: 0.9)')

    # i/o
    parser.add_argument('--log-interval', type=int, default=100, metavar='N',
                        help='How many batches to wait before logging training status')
    # Accelerating
    parser.add_argument('--prefetch', type=int, default=0, help='Pre-fetching threads.')

    return parser.parse_args()
コード例 #15
0
def process_dir(root_dir, subdir, template, kesme_islemi, onizleme):
    curr_dir = os.path.join(root_dir, subdir)
    args['noCropping'] = bool(kesme_islemi)
    # Look for template in current dir
    template_file = os.path.join(curr_dir, config.TEMPLATE_FILE)
    if os.path.exists(template_file):
        template = Template(template_file)

    # look for images in current dir to process
    paths = config.Paths(os.path.join(args['output_dir'], subdir))   
    exts = ('*.png', '*.jpg')
    omr_files = sorted(
        [f for ext in exts for f in glob(os.path.join(curr_dir, ext))])

    # Exclude marker image if exists
    if(template and template.marker_path):
        omr_files = [f for f in omr_files if f != template.marker_path]

    subfolders = sorted([file for file in os.listdir(
        curr_dir) if os.path.isdir(os.path.join(curr_dir, file))])
    if omr_files:
        args_local = args.copy()
        if("OverrideFlags" in template.options):
            args_local.update(template.options["OverrideFlags"])
        print('\n------------------------------------------------------------------')
        print(f'"{curr_dir}" dizini ayarları ile birlikte işleniyor- ')
        print("\tToplan resim        : %d" % (len(omr_files)))
        print("\tKırpma Aktif        : " + str(not args_local["noCropping"]))
        print("\tOtomatik Hizalama   : " + str(args_local["autoAlign"]))
        print("\tKullanılan Şablon   : " + str(template.path) if(template) else "N/A")
        print("\tKullanılan İşaretçi : " + str(template.marker_path)
              if(template.marker is not None) else "N/A")
        print('')

        if not template:
            print(f'Hata: işlenirken şablon bulunamadı {curr_dir}.')
            print(f'  Çalışma dizininde {config.TEMPLATE_FILE} dosyasını konumlandırın veya -t parametresi ile birlikte belirtin.')
            return

        utils.setup_dirs(paths)
        output_set = setup_output(paths, template)
        process_files(omr_files, template, args_local, output_set, onizleme)
    elif(len(subfolders) == 0):
        # the directory should have images or be non-leaf
        print(f'Bilgi: {curr_dir} klasöründe geçerli bir resim veya alt klasör bulunamadı.')

    # recursively process subfolders
    for folder in subfolders:
        process_dir(root_dir, os.path.join(subdir, folder), template, kesme_islemi, onizleme)
コード例 #16
0
def main():
    paths = config.Paths()
    start_time = time.time()
    seed = int(time.time())
    print 'seed:', seed
    np.random.seed(0)

    evaluate(paths)
    # supplementary.rgb_videos(paths)

    # plyutils.create_labels(paths)
    # cal_all_affordance(paths)
    # planning(paths, seed)

    print('Time elapsed: {}'.format(time.time() - start_time))
コード例 #17
0
def main():
    paths = config.Paths()
    paths.path_huang()
    model_path = paths.metadata_root + 'models/affordance/'
    cache_path = paths.metadata_root + 'caches/affordance/'
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    if not os.path.exists(cache_path):
        os.mkdir(cache_path)
    np.random.seed(1337)
    print('Loading data')
    object_feature, affordance_feature, distance_feature, orientation_feature = load_affordance_data(paths.metadata_root)
    x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test = data_splitting(object_feature, affordance_feature, distance_feature, orientation_feature)
    print('Building model')
    model_dir = train(x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test, model_path)
    result_output(x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test, model_dir, cache_path)
コード例 #18
0
def main(opt):
    train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/ucf101Vid_all_lin.txt'
    #train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/test.txt'
    save_path = '/media/mcislab/new_ssd/wrq/data/UCF101/res18_rgbflow_same_similiarity/'
    paths = config.Paths()
    train_dataset = UCF101dataset_creat.dataset(
        train_datalist, paths.detect_root_ucf_mmdet, paths.img_root_ucf,
        paths.rgb_res18_ucf, paths.rgb_res18_ucf, opt)  #rgb_bninc_ucf
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=opt.workers,
                                  drop_last=False)
    print('start to read feature...')
    count = 0
    for i, (edge_features, node_features, adj_mat, graph_label, tem_features,
            video_info) in enumerate(train_dataloader, 0):
        #print (video_info)
        videoinfo = video_info[0]
        category, video_name = videoinfo.split('/')
        edge_features = edge_features.squeeze(0)
        node_features = node_features.squeeze(0)
        adj_mat = adj_mat.squeeze(0)
        graph_label = graph_label.squeeze(0)
        tem_features = tem_features.squeeze(0)
        #print (category)
        #print (video_name)
        #print (edge_features.shape)
        feat_path = os.path.join(save_path, category)
        if not os.path.exists(feat_path):
            os.makedirs(feat_path)
        feat_dict = {
            'edge_features': edge_features,
            'node_features': node_features,
            'adj_mat': adj_mat,
            'graph_label': graph_label,
            'tem_features': tem_features,
            'video_info': video_info
        }
        #print(edge_features.shape)
        print(feat_path + '/' + video_name[:-4] + '.pkl')
        file = open(feat_path + '/' + video_name[:-4] + '.pkl', 'wb')
        pickle.dump(feat_dict, file)
        count += 1
    print('Has tranformed %d input features. Finished!' % count)
コード例 #19
0
    def __init__(self, root, input_imsize, transform, imageset):
        self.imageset = 'train' if imageset == 'train' else 'test'

        # result for deformable convnet? img feature
        anno_file = os.path.join(root, '{}_annotations.mat'.format(imageset))
        ld = sio.loadmat(anno_file)
        gt_anno = ld['gt_all']

        data = dict()
        data['img_ids'] = list()
        data['bbxs'] = list()
        data['actions'] = list()

        for hoi_idx, hoi in enumerate(gt_anno):
            for img_idx, bboxes in enumerate(hoi):
                if bboxes.size != 0:
                    for row in bboxes:
                        data['img_ids'].append(img_idx)
                        data['bbxs'].append(row)
                        data['actions'].append(metadata.hoi_to_action[hoi_idx])
            print('finished for ' + str(hoi_idx))
        np.save("anno_tune.npy", data)

        self.hico_path = root
        self.imsize = input_imsize
        self.transform = transform

        image_list = list()
        with open(
                os.path.join(config.Paths().project_root,
                             '{}_all.txt'.format(imageset))) as f:
            for line in f.readlines():
                image_list.append(line.strip())

        self.img_files = [image_list[x] for x in data['img_ids']]
        self.bbxs = data['bbxs']
        self.actions = data['actions']
def main():
    paths = config.Paths()
    # prepare_evaluation(paths)
    # evaluate(paths)
    qualitative_result(paths)
def main():
    paths = config.Paths()
    # plot_affordance(paths)
    plot_sample(paths)
コード例 #22
0
def main():
    paths = config.Paths()
    paths.path_huang()
    load_affordance_data(paths.metadata_root)
コード例 #23
0
ファイル: vcoco.py プロジェクト: tengyu-liu/Part-GPNN
def parse_arguments():
    # Parser check
    def restricted_float(x, inter):
        x = float(x)
        if x < inter[0] or x > inter[1]:
            raise argparse.ArgumentTypeError("%r not in range [1e-5, 1e-4]" %
                                             (x, ))
        return x

    paths = config.Paths()

    feature_type = 'resnet'

    # Path settings
    parser = argparse.ArgumentParser(description='VCOCO dataset')
    parser.add_argument('--project-root',
                        default=paths.project_root,
                        help='intermediate result path')
    parser.add_argument('--tmp-root',
                        default=paths.tmp_root,
                        help='intermediate result path')
    parser.add_argument('--data-root',
                        default=paths.vcoco_data_root,
                        help='data path')
    parser.add_argument('--log-root',
                        default=os.path.join(
                            paths.log_root,
                            'vcoco/parsing_{}'.format(feature_type)),
                        help='log files path')
    parser.add_argument(
        '--resume',
        default=os.path.join(
            paths.tmp_root,
            'checkpoints/vcoco/parsing_{}'.format(feature_type)),
        help='path to latest checkpoint')
    parser.add_argument('--eval-root',
                        default=os.path.join(
                            paths.tmp_root,
                            'evaluation/vcoco/{}'.format(feature_type)),
                        help='path to save evaluation file')
    parser.add_argument('--feature-type',
                        default=feature_type,
                        help='feature_type')
    parser.add_argument('--visualize',
                        action='store_true',
                        default=False,
                        help='Visualize final results')
    parser.add_argument('--vis-top-k',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Top k results to visualize')

    # Model parameters
    parser.add_argument('--prop-layer',
                        type=int,
                        default=3,
                        metavar='N',
                        help='Number of propogation layers (default: 3)')
    parser.add_argument('--update-layer',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Number of update hidden layers (default: 1)')
    parser.add_argument('--link-layer',
                        type=int,
                        default=3,
                        metavar='N',
                        help='Number of link hidden layers (default: 3)')
    parser.add_argument('--model-type',
                        type=str,
                        default='V1',
                        help='GPNN model type')
    parser.add_argument('--po-type',
                        type=str,
                        default='mult',
                        help='Part-Object prior type')
    parser.add_argument('--suppress-hh',
                        action='store_true',
                        default=False,
                        help='Suppresses human-human edges')

    # Optimization Options
    parser.add_argument('--batch-size',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Input batch size for training (default: 10)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='Enables CUDA training')
    parser.add_argument('--epochs',
                        type=int,
                        default=30,
                        metavar='N',
                        help='Number of epochs to train (default: 10)')
    parser.add_argument('--start-epoch',
                        type=int,
                        default=0,
                        metavar='N',
                        help='Index of epoch to start (default: 0)')
    parser.add_argument('--link-weight',
                        type=float,
                        default=2,
                        metavar='N',
                        help='Loss weight of existing edges')
    parser.add_argument(
        '--lr',
        type=lambda x: restricted_float(x, [1e-5, 1e-2]),
        default=1e-4,
        metavar='LR',
        help='Initial learning rate [1e-5, 1e-2] (default: 1e-3)')
    parser.add_argument(
        '--lr-decay',
        type=lambda x: restricted_float(x, [.01, 1]),
        default=0.8,
        metavar='LR-DECAY',
        help='Learning rate decay factor [.01, 1] (default: 0.8)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')

    # i/o
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='How many batches to wait before logging training status')
    # Accelerating
    parser.add_argument('--prefetch',
                        type=int,
                        default=0,
                        help='Pre-fetching threads.')

    # Debug
    parser.add_argument('--debug',
                        action='store_true',
                        default=False,
                        help='Runs only 10 batch and 1 epoch')

    # Feature
    parser.add_argument('--extra-feature',
                        action='store_true',
                        default=False,
                        help='Include 101-d extra node feature')

    # Negative Rejection
    parser.add_argument('--NRT',
                        action='store_true',
                        default=False,
                        help='Reject negative object-action pairs in training')
    parser.add_argument(
        '--NRE',
        action='store_true',
        default=False,
        help='Reject negative object-action pairs in evaluation')

    # AdjMat Loss
    parser.add_argument('--ignore-adj',
                        action='store_true',
                        default=False,
                        help='ignores adjacency matrix in loss function')

    return parser.parse_args()
コード例 #24
0
Sample Human from the Scene

"""
import config
import os
import pickle
from scipy.io import loadmat
import numpy as np
import matplotlib.pyplot as plt
import visualize
import copy
from camera import rotation_matrix_3d_z, center_to_corners
import random
import sys
from PIL import Image
paths = config.Paths()
metadata_root = paths.metadata_root
proposal_root = paths.proposal_root
evaluation_root = os.path.join(metadata_root, 'evaluation')
stats_root = os.path.join(metadata_root, 'sunrgbd_stats')
office_label = ['reading', 'put-down-item', 'take-item', 'play-computer']


class HumanSample(object):
    def __init__(self, pg, skeleton_stats, save_root='.', if_vis=True):
        self.pg = copy.deepcopy(pg)
        self.skeleton_stats = skeleton_stats
        self.learning_rate = 1
        self.pg_output = copy.deepcopy(pg)
        self.skeleton = list()
        self.adjust_index = list()
コード例 #25
0
class RNNAgent(object):
    total_drone_num = 0
    max_people_tracked = 4
    static_goal_num = 0

    r_bar = 0

    lstm = None
    session = None
    train = None
    saver = None

    state_size = 0
    input_size = 0
    max_goal_size = 0
    inputs = None
    q_values_tf = None
    r_tf = None
    delta_tf = None

    test = True

    if not test:
        timestamp = datetime.datetime.fromtimestamp(
            time.time()).strftime('%Y-%m-%d %H:%M:%S')
        os.mkdir(os.path.join(config.Paths().tmp_root, 'rnn', timestamp))

    def __init__(self, index, handle):
        self.__class__.total_drone_num += 1
        self._index = index
        self._handle = handle

        self.epsilon = 0.0 if self.__class__.test else 0.1

        self.last_state = None
        self.last_q = 0
        self.last_action = -1

    @classmethod
    def set_static_goals(cls, goals):
        cls.static_goal_num = len(goals[0]) + len(goals[1]) + len(goals[2])

    @classmethod
    def init_lstm(cls):
        cls.max_goal_size = cls.static_goal_num + cls.max_people_tracked

        cls.state_size = 2 * (cls.total_drone_num + cls.max_people_tracked - 1)
        cls.input_size = cls.state_size + cls.max_goal_size
        cls.inputs = tf.placeholder(tf.float32, [None, None, cls.input_size],
                                    name='input')

        lstm = tf.contrib.rnn.LSTMCell(num_units=RNNConfig.h_size,
                                       state_is_tuple=True)
        batch_size = tf.shape(cls.inputs)[1]
        initial_state = lstm.zero_state(batch_size, tf.float32)
        rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
            lstm, cls.inputs, initial_state=initial_state, time_major=True)
        cls.q_values_tf = tf.contrib.layers.fully_connected(
            rnn_outputs, num_outputs=128, activation_fn=tf.nn.relu)
        cls.q_values_tf = tf.contrib.layers.fully_connected(
            cls.q_values_tf, num_outputs=32, activation_fn=tf.nn.relu)
        # cls.q_values_tf = tf.contrib.layers.fully_connected(cls.q_values_tf, num_outputs=1, activation_fn=tf.nn.tanh)
        cls.q_values_tf = tf.contrib.layers.fully_connected(cls.q_values_tf,
                                                            num_outputs=1,
                                                            activation_fn=None)

        cls.delta_tf = tf.placeholder(tf.float32, name='delta')
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=RNNConfig.alpha)
        grads_and_vars = optimizer.compute_gradients(cls.q_values_tf)
        grads_and_vars = [(-gv[0] * cls.delta_tf, gv[1])
                          for gv in grads_and_vars]
        cls.train = optimizer.apply_gradients(grads_and_vars)

        # # Direct approximation of reward
        # cls.r_tf = tf.placeholder(tf.float32, name='r')
        # loss = tf.square(cls.q_values_tf - cls.r_tf)
        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=RNNConfig.alpha)
        # grads_and_vars = optimizer.compute_gradients(loss)
        # cls.train = optimizer.apply_gradients(grads_and_vars)

        cls.session = tf.Session()
        cls.session.run(tf.global_variables_initializer())

        # file_writer = tf.summary.FileWriter('/home/siyuan/Downloads/test/log', cls.session.graph)
        cls.saver = tf.train.Saver()

        if cls.test:
            cls.saver.restore(
                cls.session,
                os.path.join(config.Paths().tmp_root, 'rnn',
                             '2017-09-08 18:24:57',
                             'model_0190000_249_1016.ckpt'),
            )

    def act(self, goals, state, reward, episode_done=False):
        if reward == 0 and np.random.randint(
                500) != 1 and self.last_action != -1:
            return self.last_action

        goals_list = [
            k for i in range(len(goals)) for k, v in goals[i].items()
        ]

        agent_state = state[1][:]
        # Swap the current agent's position to the first
        agent_state[0], agent_state[self._index *
                                    2] = agent_state[self._index *
                                                     2], agent_state[0]
        agent_state[1], agent_state[self._index * 2 +
                                    1] = agent_state[self._index * 2 +
                                                     1], agent_state[1]

        lstm_input = np.zeros((1, 1, self.__class__.input_size))
        if len(agent_state) < self.__class__.state_size:
            lstm_input[0, 0, :len(agent_state)] = np.array(agent_state)
        else:
            lstm_input[0, 0, :self.__class__.state_size] = np.array(
                agent_state)[:self.__class__.state_size]
        lstm_input /= 210.

        # RNN outputs Q(s, a)
        q_values = np.zeros(len(goals_list)) if len(
            goals_list) < self.__class__.max_goal_size else np.zeros(
                self.__class__.max_goal_size)
        for i in range(len(q_values)):
            lstm_input[0, 0, self.__class__.state_size:] = 0
            lstm_input[0, 0, self.__class__.state_size + i] = 1
            q_values[i] = self.__class__.session.run(
                self.__class__.q_values_tf,
                {self.__class__.inputs: lstm_input})[0][0]

        # Epsilon Greedy
        if np.random.random() < self.epsilon:
            a = np.random.choice(len(q_values))
        else:
            a = random.choice(
                np.argwhere(q_values == np.max(q_values)).flatten())
        # lstm_input[0, 0, -1] = goals_list[a]
        lstm_input[0, 0, self.__class__.state_size:] = 0
        lstm_input[0, 0, self.__class__.state_size + a] = 1

        # # Boltzmann distribution
        # softmax_q_values = np.exp(q_values)
        # if np.isinf(sum(softmax_q_values)):
        #     softmax_q_values = [1./len(q_values) for _ in range(len(q_values))]
        # else:
        #     softmax_q_values /= sum(softmax_q_values)
        # a = np.random.choice(len(softmax_q_values), p=softmax_q_values)
        # lstm_input[0, 0, -1] = goals_list[a]

        # Update network
        if reward != 0 and self.epsilon != 0:
            # delta = reward - self.__class__.r_bar + q_values[a] - self.last_q  # Continuing
            if episode_done:
                print 'Episode done!'
                delta = reward - q_values[a]
            else:
                delta = reward - RNNConfig.gamma * q_values[
                    a] - self.last_q  # Episodic
            self.__class__.r_bar += RNNConfig.beta * delta
            self.__class__.session.run(
                self.__class__.train, {
                    self.__class__.inputs: self.last_state,
                    self.__class__.delta_tf: delta
                })
            # self.__class__.session.run(self.__class__.train, {self.__class__.inputs: self.last_state, self.__class__.r_tf: reward})
            print 'reward', reward, 'r_bar', self.__class__.r_bar, 'a', a, 'q', q_values[
                a], 'last_q', self.last_q, 'delta', delta, self.last_state
            print q_values

        self.last_state = lstm_input
        self.last_q = q_values[a]
        self.last_action = goals_list[a]
        print self._index, 'choose action', self.last_action
        return goals_list[a]

    @classmethod
    def save_model(cls, iteration, performance):
        if not cls.test:
            paths = config.Paths()
            cls.saver.save(
                cls.session,
                os.path.join(
                    paths.tmp_root, 'rnn', cls.timestamp,
                    'model_{:07d}_{:03d}_{:03d}.ckpt'.format(
                        iteration, performance[0], performance[1])))
コード例 #26
0
def main():
    paths = config.Paths()
    start_time = time.time()
    plot_demo(paths)
    # rgb_videos(paths)
    print('Time elapsed: {}'.format(time.time() - start_time))
コード例 #27
0
def main():
    paths = config.Paths()
    start_time = time.time()
    collect_data(paths)
    print('Time elapsed: {}s'.format(time.time() - start_time))
コード例 #28
0
def main():
    paths = config.Paths()
    # learn_furniture_wall_relationships(paths)
    learn_affordance_distributions(paths)
コード例 #29
0
def parse_arguments():
    # Parser check
    def restricted_float(x, inter):
        x = float(x)
        if x < inter[0] or x > inter[1]:
            raise argparse.ArgumentTypeError("%r not in range [1e-5, 1e-4]" %
                                             (x, ))
        return x

    paths = config.Paths()

    # Path settings
    parser = argparse.ArgumentParser(description='CAD 120 dataset')
    parser.add_argument('--project-root',
                        default=paths.project_root,
                        help='intermediate result path')
    parser.add_argument('--tmp-root',
                        default=paths.tmp_root,
                        help='intermediate result path')
    parser.add_argument('--log-root',
                        default=os.path.join(paths.log_root,
                                             'cad120/prediction'),
                        help='log files path')
    parser.add_argument('--resume',
                        default=os.path.join(paths.tmp_root,
                                             'checkpoints/cad120/prediction'),
                        help='path to latest checkpoint')
    parser.add_argument('--visualize',
                        action='store_true',
                        default=True,
                        help='Visualize final results')

    # Optimization Options
    parser.add_argument('--batch-size',
                        type=int,
                        default=1,
                        metavar='N',
                        help='Input batch size for training (default: 10)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='Enables CUDA training')
    parser.add_argument('--epochs',
                        type=int,
                        default=0,
                        metavar='N',
                        help='Number of epochs to train (default: 10)')
    parser.add_argument('--start-epoch',
                        type=int,
                        default=0,
                        metavar='N',
                        help='Index of epoch to start (default: 0)')
    parser.add_argument(
        '--lr',
        type=lambda x: restricted_float(x, [1e-5, 1e-2]),
        default=5e-5,
        metavar='LR',
        help='Initial learning rate [1e-5, 1e-2] (default: 1e-3)')
    parser.add_argument(
        '--lr-decay',
        type=lambda x: restricted_float(x, [.01, 1]),
        default=0.8,
        metavar='LR-DECAY',
        help='Learning rate decay factor [.01, 1] (default: 0.8)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')

    # i/o
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='How many batches to wait before logging training status')
    # Accelerating
    parser.add_argument('--prefetch',
                        type=int,
                        default=0,
                        help='Pre-fetching threads.')

    return parser.parse_args()
コード例 #30
0
def main():
    paths = config.Paths()
    paths.path_huang()
    without_segmentation_sequence_test_per_frame_sequential(
        paths.data_root, paths.metadata_root)