コード例 #1
0
def k_validation(args, features, bag_labels, k_valid=5):
    """
    Uses k_cross_validation to evaluate model
    :param args: arguments from parser [parser]
    :param features: list of bags  [list]
    :param bag_labels: list of bag labels [list]
    :return:
    """
    accuracies = []
    #calculates 1 iterations of k-fold cv
    if 'validate' in args.split and args.valid_iter <= k_valid:
        cur_iteration = args.valid_iter
        x_train, x_val, y_train, y_val = batch_set(features, bag_labels,
                                                   cur_iteration, k_valid)
        model = MIL(args)
        model.fit(x_train, y_train)
        y_pred, y_instance_pred = model.predict(x_val)
        rec, prec, acc, f1 = calculate_metrics(y_pred, y_val, args.cm)
        print('Acc={}'.format(acc))
        return acc
    else:
        for cur_iteration in range(k_valid):
            x_train, x_val, y_train, y_val = batch_set(features, bag_labels,
                                                       cur_iteration, k_valid)
            model = MIL(args)
            model.fit(x_train, y_train)
            y_pred, y_instance_pred = model.predict(x_val)
            rec, prec, acc, f1 = calculate_metrics(y_pred, y_val, args.cm)
            accuracies.append(acc)
            print('Acc={}'.format(acc))
        mean = average(accuracies)
        print('Result of k-validation: mean = {}, std={}'.format(
            mean, standard_deviaton(accuracies, mean)))
        return mean
コード例 #2
0
def main():
    model = MIL(hw=FLAGS.hw,
                ch=FLAGS.ch,
                attn_dim=FLAGS.attn_dim,
                use_attn=FLAGS.use_attn,
                use_gate=FLAGS.use_gate,
                wd=FLAGS.wd,
                lr=FLAGS.lr)
    opt_dict = model.get_opt_dict()
    train(opt_dict)
    return
コード例 #3
0
def run(args, dataset):
    accuracies = []
    for run in range(5):
        dataset.random_shuffle()
        x_train, y_train = dataset.return_training_set()
        x_test, y_test = dataset.return_testing_set()
        model = MIL(args)
        model.fit(x_train, y_train)
        y_pred, y_instance_pred = model.predict(x_test)
        rec, prec, acc, f1 = calculate_metrics(y_pred, y_test, args.cm)
        accuracies.append(acc)
        print('Acc={}'.format(acc))
    mean = average(accuracies)
    std_dev = standard_deviaton(accuracies, mean)
    print('Result of evaluation: mean = {}, std={}'.format(mean, std_dev))
コード例 #4
0
def main(argv):
    if len(argv) < 1: printFiles()
    filename = argv[0]
    video_filepath, data = parseOneVideo(filename)
    labels = crop(data[0])
    MILlist = [MIL(video_filepath, 0, label) for label in labels]
    KCFlist = [kcf.KCF(video_filepath, 0, label) for label in labels]
    # showAll(MILlist)
    kcf.showAll(KCFlist + MILlist)
コード例 #5
0
def main(argv):
    if len(argv) < 1: printFiles()
    filename = argv[0]
    video_filepath, data = parseOneVideo(filename)
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    labels = crop(data[0])
    MILlist = [MIL(video_filepath, 0, label) for label in labels]
    KCFlist = [kcf.KCF(video_filepath, 0, label) for label in labels]
    # showAll(MILlist)
    kcf.showAll(KCFlist + MILlist)
コード例 #6
0
def train(args, dataset):
    x_train, y_train = dataset.return_training_set()
    filepath = os.getcwd() + model_path(args)
    model = MIL(args)
    with open(filepath, 'wb') as model_file:
        model.fit(x_train, y_train)
        pickle.dump(model, model_file)
    y_pred, y_instance_pred = model.predict(x_train)
    if args.v:
        loss = model.return_loss_history()
        visualize_loss(args, loss)
コード例 #7
0
ファイル: misvm.py プロジェクト: jjjkkkjjj/machine-learning
# activate milpy35
from mil import MIL

method = 'img'
dir_name = 'img-misvm'
kernel = 'rbf'
gamma = 0.0012
C = 250
experience = '2018'
positiveCsvFileName = 'hard-video.csv'
negativeCsvFileName = 'easy-video.csv'
path = './result/{0}/{1}/g{2}/c{3}'.format(experience, dir_name, gamma, C)
dicimate = 4

mil = MIL(method=method,
          experience=experience,
          dirName=dir_name,
          estimatorName='misvm')
mil.setData(positiveCsvFileName=positiveCsvFileName,
            negativeCsvFileName=negativeCsvFileName,
            saveMotionTmplate=False,
            dicimate=dicimate,
            videoExtension='mp4',
            csvExtension='csv')
#mil.importCsv2Feature(positiveCsvFileName, negativeCsvFileName, dicimate, data='all')


def main():  # read hard and easy
    estimator = misvm.miSVM(kernel=kernel,
                            gamma=gamma,
                            C=C,
                            verbose=True,
コード例 #8
0
def main():
    tf.set_random_seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)
    random.seed(FLAGS.random_seed)
    # Build up environment to prevent segfault
    if not FLAGS.train:
        if 'reach' in FLAGS.experiment:
            env = gym.make('ReacherMILTest-v1')
            ob = env.reset()
            # import pdb; pdb.set_trace()
    graph = tf.Graph()
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(graph=graph, config=tf_config)
    network_config = {
        'num_filters': [FLAGS.num_filters] * FLAGS.num_conv_layers,
        'strides': [[1, 2, 2, 1]] * FLAGS.num_strides + [[1, 1, 1, 1]] *
        (FLAGS.num_conv_layers - FLAGS.num_strides),
        'filter_size':
        FLAGS.filter_size,
        'image_width':
        FLAGS.im_width,
        'image_height':
        FLAGS.im_height,
        'image_channels':
        FLAGS.num_channels,
        'n_layers':
        FLAGS.num_fc_layers,
        'layer_size':
        FLAGS.layer_size,
        'initialization':
        FLAGS.init,
    }
    data_generator = DataGenerator()
    state_idx = data_generator.state_idx
    img_idx = range(
        len(state_idx),
        len(state_idx) + FLAGS.im_height * FLAGS.im_width * FLAGS.num_channels)
    # need to compute x_idx and img_idx from data_generator
    model = MIL(data_generator._dU,
                state_idx=state_idx,
                img_idx=img_idx,
                network_config=network_config)
    # TODO: figure out how to save summaries and checkpoints
    exp_string = FLAGS.experiment+ '.' + FLAGS.init + '_init.' + str(FLAGS.num_conv_layers) + '_conv' + '.' + str(FLAGS.num_strides) + '_strides' + '.' + str(FLAGS.num_filters) + '_filters' + \
                '.' + str(FLAGS.num_fc_layers) + '_fc' + '.' + str(FLAGS.layer_size) + '_dim' + '.bt_dim_' + str(FLAGS.bt_dim) + '.mbs_'+str(FLAGS.meta_batch_size) + \
                '.ubs_' + str(FLAGS.update_batch_size) + '.numstep_' + str(FLAGS.num_updates) + '.updatelr_' + str(FLAGS.train_update_lr)

    if FLAGS.clip:
        exp_string += '.clip_' + str(int(FLAGS.clip_max))
    if FLAGS.conv_bt:
        exp_string += '.conv_bt'
    if FLAGS.all_fc_bt:
        exp_string += '.all_fc_bt'
    if FLAGS.fp:
        exp_string += '.fp'
    if FLAGS.learn_final_eept:
        exp_string += '.learn_ee_pos'
    if FLAGS.no_action:
        exp_string += '.no_action'
    if FLAGS.zero_state:
        exp_string += '.zero_state'
    if FLAGS.two_head:
        exp_string += '.two_heads'
    if FLAGS.two_arms:
        exp_string += '.two_arms'
    if FLAGS.temporal_conv_2_head:
        exp_string += '.1d_conv_act_' + str(
            FLAGS.temporal_num_layers) + '_' + str(FLAGS.temporal_num_filters)
        if FLAGS.temporal_conv_2_head_ee:
            exp_string += '_ee_' + str(
                FLAGS.temporal_num_layers_ee) + '_' + str(
                    FLAGS.temporal_num_filters_ee)
        exp_string += '_' + str(FLAGS.temporal_filter_size) + 'x1_filters'
    if FLAGS.training_set_size != -1:
        exp_string += '.' + str(FLAGS.training_set_size) + '_trials'

    log_dir = FLAGS.log_dirs + '/' + exp_string

    # put here for now
    if FLAGS.train:
        data_generator.generate_batches(noisy=FLAGS.use_noisy_demos)
        with graph.as_default():
            # train_image_tensors = data_generator.make_batch_tensor(network_config, restore_iter=FLAGS.restore_iter)
            train_image_tensors = data_generator.make_compare_batch_tensor(
                network_config, restore_iter=FLAGS.restore_iter)

            inputa = train_image_tensors[:, :FLAGS.update_batch_size *
                                         FLAGS.T, :]
            inputb = train_image_tensors[:, FLAGS.update_batch_size *
                                         FLAGS.T:(FLAGS.update_batch_size +
                                                  1) * FLAGS.T, :]
            inputc = train_image_tensors[:, (FLAGS.update_batch_size + 1) *
                                         FLAGS.T:, :]

            # train_input_tensors = {'inputa': inputa, 'inputb': inputb}
            train_input_tensors = {
                'inputa': inputa,
                'inputb': inputb,
                'inputc': inputc
            }

            # val_image_tensors = data_generator.make_batch_tensor(network_config, restore_iter=FLAGS.restore_iter, train=False)
            # inputa = val_image_tensors[:, :FLAGS.update_batch_size*FLAGS.T, :]
            # inputb = val_image_tensors[:, FLAGS.update_batch_size*FLAGS.T:, :]
            # val_input_tensors = {'inputa': inputa, 'inputb': inputb}
        model.init_network(graph,
                           input_tensors=train_input_tensors,
                           restore_iter=FLAGS.restore_iter)
        # model.init_network(graph, input_tensors=val_input_tensors, restore_iter=FLAGS.restore_iter, prefix='Validation_')
    else:
        model.init_network(graph, prefix='Testing')
    with graph.as_default():
        # Set up saver.
        saver = tf.train.Saver(max_to_keep=10)
        # Initialize variables.
        init_op = tf.global_variables_initializer()
        sess.run(init_op, feed_dict=None)
        # Start queue runners (used for loading videos on the fly)
        tf.train.start_queue_runners(sess=sess)

    if FLAGS.resume:
        model_file = tf.train.latest_checkpoint(log_dir)
        if FLAGS.restore_iter > 0:
            model_file = model_file[:model_file.index('model'
                                                      )] + 'model_' + str(
                                                          FLAGS.restore_iter)
        if model_file:
            ind1 = model_file.index('model')
            resume_itr = int(model_file[ind1 + 6:])
            print("Restoring model weights from " + model_file)
            with graph.as_default():
                saver.restore(sess, model_file)
    if FLAGS.train:
        train(graph,
              model,
              saver,
              sess,
              data_generator,
              log_dir,
              restore_itr=FLAGS.restore_iter)

    else:
        model_file = tf.train.latest_checkpoint(log_dir)
        if (FLAGS.begin_restore_iter != FLAGS.end_restore_iter):
            iter_index = FLAGS.begin_restore_iter
            while iter_index <= FLAGS.end_restore_iter:
                print('iter_index', iter_index)
                if FLAGS.restore_iter >= 0:
                    model_file = model_file[:model_file.index(
                        'model')] + 'model_' + str(iter_index)
                if model_file:
                    ind1 = model_file.index('model')
                    resume_itr = int(model_file[ind1 + 6:])
                    print("Restoring model weights from " + model_file)
                    # saver = tf.train.Saver()
                    saver.restore(sess, model_file)
                if 'reach' in FLAGS.experiment:
                    env = gym.make('ReacherMILTest-v1')
                    env.reset()
                    generate_test_demos(data_generator)
                    evaluate_vision_reach(env, graph, model, data_generator,
                                          sess, exp_string, FLAGS.record_gifs,
                                          log_dir)

                    # evaluate_rl_vision_reach(graph, data_generator, sess, exp_string, FLAGS.record_gifs, log_dirs)
                elif 'push' in FLAGS.experiment:
                    evaluate_push(sess,
                                  graph,
                                  model,
                                  data_generator,
                                  exp_string,
                                  log_dir,
                                  FLAGS.demo_file + '/',
                                  save_video=FLAGS.record_gifs)
                iter_index += 100
        else:
            if FLAGS.restore_iter > 0:
                model_file = model_file[:model_file.
                                        index('model')] + 'model_' + str(
                                            FLAGS.restore_iter)
            if model_file:
                ind1 = model_file.index('model')
                resume_itr = int(model_file[ind1 + 6:])
                print("Restoring model weights from " + model_file)
                # saver = tf.train.Saver()
                saver.restore(sess, model_file)
            if 'reach' in FLAGS.experiment:
                env = gym.make('ReacherMILTest-v1')
                env.reset()
                generate_test_demos(data_generator)
                evaluate_vision_reach(env, graph, model, data_generator, sess,
                                      exp_string, FLAGS.record_gifs, log_dir)
                # evaluate_vision_reach(env, graph, data_generator, sess, exp_string, FLAGS.record_gifs, log_dir)
                # evaluate_rl_vision_reach(graph, data_generator, sess, exp_string, FLAGS.record_gifs, log_dirs)
            elif 'push' in FLAGS.experiment:
                evaluate_push(sess,
                              graph,
                              model,
                              data_generator,
                              exp_string,
                              log_dir,
                              FLAGS.demo_file + '/',
                              save_video=FLAGS.record_gifs)
コード例 #9
0
ファイル: main.py プロジェクト: asilx/one-shot-neem-training
def main():
    tf.set_random_seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)
    random.seed(FLAGS.random_seed)

    graph = tf.Graph()
    #  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
    #  tf_config = tf.ConfigProto(gpu_options=gpu_options)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(graph=graph, config=tf_config)
    network_config = {
        'num_filters': [FLAGS.num_filters]*FLAGS.num_conv_layers,
        'strides': [[1, 2, 2, 1]]*FLAGS.num_strides + [[1, 1, 1, 1]]*(FLAGS.num_conv_layers-FLAGS.num_strides),
        'filter_size': FLAGS.filter_size,
        'image_width': FLAGS.im_width,
        'image_height': FLAGS.im_height,
        'image_channels': FLAGS.num_channels,
        'n_layers': FLAGS.num_fc_layers,
        'layer_size': FLAGS.layer_size,
        'initialization': FLAGS.init,
    }

    data_generator = DataGenerator()
    if FLAGS.train:
        state_idx = data_generator.state_idx
    else:
        state_idx = range(7)
        data_generator._dU = 7
    img_idx = range(len(state_idx), len(state_idx)+FLAGS.im_height*FLAGS.im_width*FLAGS.num_channels)
    model = MIL(data_generator._dU, state_idx=state_idx, img_idx=img_idx, network_config=network_config)

    log_dir = os.path.join( FLAGS.data_path, 'logged_model')

    if FLAGS.train:
        with graph.as_default():
            #  train_image_tensors = data_generator.make_batch_tensor(network_config)
            #  inputa = train_image_tensors[:, :FLAGS.number_of_shot*FLAGS.TimeFrame, :]
            #  inputb = train_image_tensors[:, FLAGS.number_of_shot*FLAGS.TimeFrame:, :]
            inputa, inputb = data_generator.make_batch_tensor(network_config)
            train_input_tensors = {'inputa': inputa, 'inputb': inputb}
            #  val_image_tensors = data_generator.make_batch_tensor(network_config, train=False)
            #  inputa = val_image_tensors[:, :FLAGS.number_of_shot*FLAGS.TimeFrame, :]
            #  inputb = val_image_tensors[:, FLAGS.number_of_shot*FLAGS.TimeFrame:, :]
            inputa, inputb = data_generator.make_batch_tensor(network_config, train=False)
            val_input_tensors = {'inputa': inputa, 'inputb': inputb}
        model.init_network(graph, input_tensors=train_input_tensors)
        model.init_network(graph, input_tensors=val_input_tensors, prefix='Validation_')
    else:
        model.init_network(graph, prefix='Testing')
    with graph.as_default():
        # Set up saver.
        saver = tf.train.Saver(max_to_keep=10)
        # Initialize variables.
        init_op = tf.global_variables_initializer()
        sess.run(init_op, feed_dict=None)
        # Start queue runners (used for loading videos on the fly)
        tf.train.start_queue_runners(sess=sess)
    if FLAGS.resume:
        model_file = tf.train.latest_checkpoint(log_dir)
        print(model_file)
        #if FLAGS.restore_iter > 0:
        #    model_file = model_file[:model_file.index('model')] + 'model_' + str(FLAGS.restore_iter)
        if model_file:
            with graph.as_default():
                saver.restore(sess, model_file)
    if FLAGS.train:
        train(graph, model, saver, sess, data_generator, log_dir)
    else:
        robot_data_path = os.path.join( FLAGS.data_path, 'low_res_robot_data')
        load_one_shot_data_from_path(robot_data_path, data_generator, network_config)
        if FLAGS.experiment == 'reaching':
            control_robot(graph, model, data_generator, sess, 'reach', log_dir)
        else:
            control_robot(graph, model, data_generator, sess, 'push', log_dir)
コード例 #10
0
    'n_layers':
    FLAGS.num_fc_layers,
    'layer_size':
    FLAGS.layer_size,
    'initialization':
    FLAGS.init,
    'temporal_conv_2_head_ee':
    FLAGS.temporal_conv_2_head_ee,
}
data_generator = DataGenerator()
state_idx = data_generator.state_idx
img_idx = range(
    len(state_idx),
    len(state_idx) + FLAGS.im_height * FLAGS.im_width * FLAGS.num_channels)
model = MIL(data_generator._dU,
            state_idx=state_idx,
            img_idx=img_idx,
            network_config=network_config)
model.init_network(graph, prefix='Testing')


exp_string = FLAGS.experiment + '.' + FLAGS.init + '_init.' + str(FLAGS.num_conv_layers) + '_conv' + '.' + str(FLAGS.num_strides) + '_strides' + '.' + str(FLAGS.num_filters) + '_filters' + \
            '.' + str(FLAGS.num_fc_layers) + '_fc' + '.' + str(FLAGS.layer_size) + '_dim' + '.bt_dim_' + str(FLAGS.bt_dim) + '.mbs_'+str(FLAGS.meta_batch_size) + \
            '.ubs_' + str(FLAGS.update_batch_size) + '.numstep_' + str(FLAGS.num_updates) + '.updatelr_' + str(FLAGS.train_update_lr)

if FLAGS.clip:
    exp_string += '.clip_' + str(int(FLAGS.clip_max))
if FLAGS.conv_bt:
    exp_string += '.conv_bt'
if FLAGS.all_fc_bt:
    exp_string += '.all_fc_bt'
if FLAGS.fp:
コード例 #11
0
def main():
    tf.set_random_seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)
    random.seed(FLAGS.random_seed)
#     # Build up environment to prevent segfault
#     if not FLAGS.train:
#         if 'reach' in FLAGS.experiment:
#             env = gym.make('Reacher-v2')
#             ob = env.reset()
#             # import pdb; pdb.set_trace()
    graph = tf.Graph()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)
    sess = tf.Session(graph=graph, config=tf_config)
    network_config = {
        'num_filters': [FLAGS.num_filters]*FLAGS.num_conv_layers,
        'strides': [[1, 2, 2, 1]]*FLAGS.num_strides + [[1, 1, 1, 1]]*(FLAGS.num_conv_layers-FLAGS.num_strides),
        'filter_size': FLAGS.filter_size,
        'image_width': FLAGS.im_width,
        'image_height': FLAGS.im_height,
        'image_channels': FLAGS.num_channels,
        'n_layers': FLAGS.num_fc_layers,
        'layer_size': FLAGS.layer_size,
        'initialization': FLAGS.init,
        'temporal_conv_2_head_ee': FLAGS.temporal_conv_2_head_ee,
    }
    data_generator = DataGenerator()
    state_idx = data_generator.state_idx
    img_idx = range(len(state_idx), len(state_idx)+FLAGS.im_height*FLAGS.im_width*FLAGS.num_channels)
    # need to compute x_idx and img_idx from data_generator
    if FLAGS.experiment == 'target_vision_reach':
        model = MTL(data_generator._dU, data_generator._dT, state_idx=state_idx, img_idx=img_idx, network_config=network_config)
    else:
        model = MIL(data_generator._dU, data_generator._dT, state_idx=state_idx, img_idx=img_idx, network_config=network_config)
    # TODO: figure out how to save summaries and checkpoints
    exp_string = FLAGS.experiment+ '.' + FLAGS.init + '_init.' + str(FLAGS.num_conv_layers) + '_conv' + '.' + str(FLAGS.num_strides) + '_strides' + '.' + str(FLAGS.num_filters) + '_filters' + \
                '.' + str(FLAGS.num_fc_layers) + '_fc' + '.' + str(FLAGS.layer_size) + '_dim' + '.bt_dim_' + str(FLAGS.bt_dim) + '.mbs_'+str(FLAGS.meta_batch_size) + \
                '.ubs_' + str(FLAGS.update_batch_size) + '.numstep_' + str(FLAGS.num_updates) + '.updatelr_' + str(FLAGS.train_update_lr)

    if FLAGS.clip:
        exp_string += '.clip_' + str(int(FLAGS.clip_max))
    if FLAGS.conv_bt:
        exp_string += '.conv_bt'
    if FLAGS.all_fc_bt:
        exp_string += '.all_fc_bt'
    if FLAGS.fp:
        exp_string += '.fp'
    if FLAGS.learn_final_eept:
        exp_string += '.learn_ee_pos'
    if FLAGS.no_action:
        exp_string += '.no_action'
    if FLAGS.zero_state:
        exp_string += '.zero_state'
    if FLAGS.two_head:
        exp_string += '.two_heads'
    if FLAGS.two_arms:
        exp_string += '.two_arms'
    if FLAGS.temporal_conv_2_head:
        exp_string += '.1d_conv_act_' + str(FLAGS.temporal_num_layers) + '_' + str(FLAGS.temporal_num_filters)
        if FLAGS.temporal_conv_2_head_ee:
            exp_string += '_ee_' + str(FLAGS.temporal_num_layers_ee) + '_' + str(FLAGS.temporal_num_filters_ee)
        exp_string += '_' + str(FLAGS.temporal_filter_size) + 'x1_filters'
    if FLAGS.training_set_size != -1:
        exp_string += '.' + str(FLAGS.training_set_size) + '_trials'

    log_dir = FLAGS.log_dir + '/' + exp_string

    # put here for now
    if FLAGS.train:
        data_generator.generate_png_batches()
        with graph.as_default():
            train_image_tensors, train_file_tensors = data_generator.make_png_batch_tensor(network_config, restore_iter=FLAGS.restore_iter)
            inputa = train_image_tensors[:, :FLAGS.update_batch_size*FLAGS.T, :]
            inputb = train_image_tensors[:, FLAGS.update_batch_size*FLAGS.T:, :]
            train_input_tensors = {'inputa': inputa, 'inputb': inputb, 'filenames':train_file_tensors}
            val_image_tensors, val_file_tensors = data_generator.make_png_batch_tensor(network_config, restore_iter=FLAGS.restore_iter, train=False)
            inputa = val_image_tensors[:, :FLAGS.update_batch_size*FLAGS.T, :]
            inputb = val_image_tensors[:, FLAGS.update_batch_size*FLAGS.T:, :]
            val_input_tensors = {'inputa': inputa, 'inputb': inputb, 'filenames':val_file_tensors}
        model.init_network(graph, input_tensors=train_input_tensors, restore_iter=FLAGS.restore_iter)
        model.init_network(graph, input_tensors=val_input_tensors, restore_iter=FLAGS.restore_iter, prefix='Validation_')
    else:
        model.init_network(graph, prefix='Testing')
    with graph.as_default():
        # Set up saver.
        saver = tf.train.Saver(max_to_keep=100)
        # Initialize variables.
        init_op = tf.global_variables_initializer()
        sess.run(init_op, feed_dict=None)
        # Start queue runners (used for loading videos on the fly)
        tf.train.start_queue_runners(sess=sess)

    #Load a previosly trained model and resume from where you left of
    if FLAGS.resume:
        model_file = tf.train.latest_checkpoint(log_dir) #Load Latest model
        # Load a previous model if you have restoration iteration number
        if FLAGS.restore_iter > 0:
            model_file = model_file[:model_file.index('model')] + 'model_' + str(FLAGS.restore_iter)
        if model_file:
            ind1 = model_file.index('model')
            resume_itr = int(model_file[ind1+6:]) #Find the iteration from which to resume
            FLAGS.restore_iter = resume_itr
            print("Restoring model weights from " + model_file)
            with graph.as_default():
                saver.restore(sess, model_file)

    if FLAGS.train:
        train(graph, model, saver, sess, data_generator, log_dir, restore_itr=FLAGS.restore_iter)
    else:
        if 'reach' in FLAGS.experiment:
            data_generator.generate_test_demos()
            
            print(data_generator.selected_demo['selected_demoX'][0].shape)
            print(data_generator.selected_demo['selected_demoU'][0].shape)
            print(data_generator.selected_demo['selected_demoO'][0].shape)
        else:
            raise NotImplementedError
コード例 #12
0
ファイル: MISVM.py プロジェクト: jjjkkkjjj/machine-learning
# activate milpy35
from mil import MIL

method = 'img'
dir_name = 'img-MISVM-focusEasy'
kernel = 'rbf'
gamma = 0.0012
C = 1000
sample_num_per_label = 0
experience = '2018'
path = './result/{0}/{1}/g{2}/c{3}'.format(experience, dir_name, gamma, C)
dicimate = 4
person = []

mil = MIL(method=method,
          experience=experience,
          dirName=dir_name,
          estimatorName='MISVM')
mil.setData(positiveCsvFileName='easy-video.csv',
            negativeCsvFileName='hard-video.csv',
            saveMotionTmplate=False,
            dicimate=4,
            videoExtension='mp4',
            csvExtension='csv')


def main():  # read hard and easy
    estimator = misvm.MISVM(kernel=kernel,
                            gamma=gamma,
                            C=C,
                            verbose=True,
                            max_iters=100)