Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('outfile', help='detection file output')
    parser.add_argument('-c', '--config', default='conf.yaml')
    parser.add_argument('-m', '--model', default=None)
    parser.add_argument('-s', '--imdb', default=None)
    args, unparsed = parser.parse_known_args()

    cfg_from_file(args.config)

    if args.model is not None:
        cfg.test_model = args.model

    if args.imdb is not None:
        cfg.test.imdb = args.imdb

    test_imdb = imdb.get_imdb(cfg.test.imdb, is_training=False)
    dets = test_run(test_imdb)
    save_dets(test_imdb, dets, args.outfile)
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('outfile', help='detection file output')
    parser.add_argument('-c', '--config', default='conf.yaml')
    parser.add_argument('-m', '--model', default=None)
    parser.add_argument('-s', '--imdb', default=None)
    args, unparsed = parser.parse_known_args()

    cfg_from_file(args.config)

    if args.model is not None:
        cfg.test_model = args.model

    if args.imdb is not None:
        cfg.test.imdb = args.imdb
    print(cfg.test.imdb)
    test_imdb = imdb.get_imdb(cfg.test.imdb, is_training=False)
    from x_common import save_imdb_to_tsv 
    save_imdb_to_tsv(test_imdb, 'output/pre_nms.tsv', 'output/gt.tsv')
    dets = test_run(test_imdb)
    #save_dets(test_imdb, dets, args.outfile)
    save_dets_to_tsv(test_imdb, dets, args.outfile)
Ejemplo n.º 3
0
def train(resume, visualize):
    np.random.seed(cfg.random_seed)
    dataset, train_imdb = get_dataset()
    do_val = len(cfg.train.val_imdb) > 0

    class_weights = class_equal_weights(train_imdb)
    (preloaded_batch, enqueue_op, enqueue_placeholders,
     q_size) = setup_preloading(
            Gnet.get_batch_spec(train_imdb['num_classes']))
    reg = tf.contrib.layers.l2_regularizer(cfg.train.weight_decay)
    net = Gnet(num_classes=train_imdb['num_classes'], batch=preloaded_batch,
               weight_reg=reg, class_weights=class_weights)
    lr_gen = LearningRate()
    # reg_ops = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    # reg_op = tf.reduce_mean(reg_ops)
    # optimized_loss = net.loss + reg_op
    optimized_loss = tf.contrib.losses.get_total_loss()
    learning_rate, train_op = get_optimizer(
        optimized_loss, net.trainable_variables)

    val_net = val_imdb = None
    if do_val:
        val_imdb = imdb.get_imdb(cfg.train.val_imdb, is_training=False)
        val_net = Gnet(num_classes=val_imdb['num_classes'], reuse=True)

    with tf.name_scope('summaries'):
        tf.summary.scalar('loss', optimized_loss)
        tf.summary.scalar('data_loss', net.loss)
        tf.summary.scalar('data_loss_normed', net.loss_normed)
        tf.summary.scalar('data_loss_unnormed', net.loss_unnormed)
        tf.summary.scalar('lr', learning_rate)
        tf.summary.scalar('q_size', q_size)
        if cfg.train.histograms:
            tf.summary.histogram('roi_feats', net.roifeats)
            tf.summary.histogram('det_imfeats', net.det_imfeats)
            tf.summary.histogram('pw_feats', net.pw_feats)
            for i, blockout in enumerate(net.block_feats):
                tf.summary.histogram('block{:02d}'.format(i + 1),
                                     blockout)
        merge_summaries_op = tf.summary.merge_all()

    with tf.name_scope('averaging'):
        ema = tf.train.ExponentialMovingAverage(decay=0.7)
        maintain_averages_op = ema.apply(
            [net.loss_normed, net.loss_unnormed, optimized_loss])
        # update moving averages after every loss evaluation
        with tf.control_dependencies([train_op]):
            train_op = tf.group(maintain_averages_op)
        smoothed_loss_normed = ema.average(net.loss_normed)
        smoothed_loss_unnormed = ema.average(net.loss_unnormed)
        smoothed_optimized_loss = ema.average(optimized_loss)

    restorer = ckpt = None
    if resume:
        ckpt = tf.train.get_checkpoint_state('./')
        restorer = tf.train.Saver()
    elif cfg.gnet.imfeats:
        variables_to_restore = slim.get_variables_to_restore(
            include=["resnet_v1"])
        variables_to_exclude = \
            slim.get_variables_by_suffix('Adam_1', scope='resnet_v1') + \
            slim.get_variables_by_suffix('Adam', scope='resnet_v1') + \
            slim.get_variables_by_suffix('Momentum', scope='resnet_v1')
        restorer = tf.train.Saver(
            list(set(variables_to_restore) - set(variables_to_exclude)))

    saver = tf.train.Saver(max_to_keep=None)
    model_manager = ModelManager()
    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:
        train_writer = tf.summary.FileWriter(cfg.log_dir, sess.graph)
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        coord = start_preloading(
            sess, enqueue_op, dataset, enqueue_placeholders)

        start_iter = 1
        if resume:
            restorer.restore(sess, ckpt.model_checkpoint_path)
            tensor = tf.get_default_graph().get_tensor_by_name("global_step:0")
            start_iter = sess.run(tensor + 1)
        elif cfg.gnet.imfeats:
            restorer.restore(sess, cfg.train.pretrained_model)

        for it in range(start_iter, cfg.train.num_iter + 1):
            if coord.should_stop():
                break

            if visualize:
                # don't do actual training, just visualize data
                visualize_detections(sess, it, learning_rate, lr_gen, net,
                                     train_op)
                continue

            (_, val_total_loss, val_loss_normed, val_loss_unnormed,
             summary) = sess.run(
                [train_op, smoothed_optimized_loss, smoothed_loss_normed,
                 smoothed_loss_unnormed, merge_summaries_op],
                feed_dict={learning_rate: lr_gen.get_lr(it)})
            train_writer.add_summary(summary, it)

            if it % cfg.train.display_iter == 0:
                print(('{}  iter {:6d}   lr {:8g}   opt loss {:8g}     '
                       'data loss normalized {:8g}   '
                       'unnormalized {:8g}').format(
                    datetime.now(), it, lr_gen.get_lr(it), val_total_loss,
                    val_loss_normed, val_loss_unnormed))

            if do_val and it % cfg.train.val_iter == 0:
                print('{}  starting validation'.format(datetime.now()))
                val_map, mc_ap, pc_ap = val_run(sess, val_net, val_imdb)
                print(('{}  iter {:6d}   validation pass:   mAP {:5.1f}   '
                       'multiclass AP {:5.1f}').format(
                      datetime.now(), it, val_map, mc_ap))

                #save_path = saver.save(sess, net.name, global_step=it)
                #print('wrote model to {}'.format(save_path))
                # dump_debug_info(sess, net, it)
                #model_manager.add(it, val_map, save_path)
                #model_manager.print_summary()
                #model_manager.write_link_to_best('./gnet_best')

            #elif it % cfg.train.save_iter == 0 or it == cfg.train.num_iter:
            #    save_path = saver.save(sess, net.name, global_step=it)
            #    print('wrote model to {}'.format(save_path))
            #    # dump_debug_info(sess, net, it)

        coord.request_stop()
        coord.join()
    print('training finished')
    if do_val:
        print('summary of validation performance')
        model_manager.print_summary()
Ejemplo n.º 4
0
def get_dataset():
    train_imdb = imdb.get_imdb(cfg.train.imdb, is_training=True)
    need_imfeats = cfg.gnet.imfeats or cfg.gnet.load_imfeats
    return ShuffledDataset(train_imdb, 1, need_imfeats), train_imdb
Ejemplo n.º 5
0
def main():
	if len(sys.argv) < 3:
		print "Usage: <netflix e-mail> <netflix password> ==> Print Queue and Ratings"
		exit(0)
	email = sys.argv[1]
	password = sys.argv[2]
	
	#initilaize movie database for storing movies and webdriver for opening the page and scraping
	
	mdb = MovieDatabase()
	
	movies = []
	#initilaize webdriver
	driver = webdriver.Firefox()
	
	driver.get("https://www.netflix.com/Login")
	
	#Login procedure
	e_email = driver.find_element_by_name("email")
	e_button_submit = driver.find_element_by_class_name("login-button")
	
	e_email.send_keys(email)
	e_remember = driver.find_element_by_name("rememberMe")
	e_remember.click()
	e_button_submit.click()
	
	e_password = driver.find_element_by_name("password")
	e_password.send_keys(password)
	
	e_button_submit = driver.find_element_by_class_name("login-button")
	print "[+] Signing in..."
	e_button_submit.click()


	load_time(4)
	
	#click the right profile
	profile = driver.find_elements_by_class_name("profile-icon")
	
	
	profile = profile[2]
	profile.click()
	load_time(2)
	driver.get("http://www.netflix.com/browse/my-list")
	print "[+] Waiting for movie data...\n\n"
	load_time(15)
	
	#Find movies by tag in my-list
	temp = driver.find_elements_by_class_name("video-artwork")
	print "Titles found: " + str(len(temp))
	
	for i in temp:
		print temp
		
	temp = driver.find_elements_by_tag_name("a")
	years = driver.find_elements_by_class_name("year")
	years = [i.text for i in years]
	
	if len(temp) > 0:
		print "[+] Movie tags found..."
		
	titles =[]
	print "[+] Waiting to clean and print..."
	
	for i in temp:
		if i.get_attribute("type") == "title":
			titles.append(i.text)
			
	driver.close()
	
	#Iterate through list of movie titles in netflix list, get informatino about them from the API,
	#and add them to the database
	
	for i in range(len(titles)):
		title = titles[i]
		t = title.encode('utf-8','replace')
		y = years[i].encode('utf-8','replace')
		rating = get_imdb(t,year=y)
		m = Movie(t,y,rating)
		mdb.db_update_movie(m)
		mdb.commit()
		movies.append(m)
		m.print_movie()
		
	print "[+] %d movies found in instant queue" %(len(movies))
	#for m in movies:
	#	m.print_movie()
	#print "dumping titles list"
	#print titles
	del mdb
Ejemplo n.º 6
0
def main():
    if len(sys.argv) < 3:
        print "Usage: <netflix e-mail> <netflix password> ==> Print Queue and Ratings"
        exit(0)
    email = sys.argv[1]
    password = sys.argv[2]

    #initilaize movie database for storing movies and webdriver for opening the page and scraping

    mdb = MovieDatabase()

    movies = []
    #initilaize webdriver
    driver = webdriver.Firefox()

    driver.get("https://www.netflix.com/Login")

    #Login procedure
    e_email = driver.find_element_by_name("email")
    e_button_submit = driver.find_element_by_class_name("login-button")

    e_email.send_keys(email)
    e_remember = driver.find_element_by_name("rememberMe")
    e_remember.click()
    e_button_submit.click()

    e_password = driver.find_element_by_name("password")
    e_password.send_keys(password)

    e_button_submit = driver.find_element_by_class_name("login-button")
    print "[+] Signing in..."
    e_button_submit.click()

    load_time(4)

    #click the right profile
    profile = driver.find_elements_by_class_name("profile-icon")

    profile = profile[2]
    profile.click()
    load_time(2)
    driver.get("http://www.netflix.com/browse/my-list")
    print "[+] Waiting for movie data...\n\n"
    load_time(15)

    #Find movies by tag in my-list
    temp = driver.find_elements_by_class_name("video-artwork")
    print "Titles found: " + str(len(temp))

    for i in temp:
        print temp

    temp = driver.find_elements_by_tag_name("a")
    years = driver.find_elements_by_class_name("year")
    years = [i.text for i in years]

    if len(temp) > 0:
        print "[+] Movie tags found..."

    titles = []
    print "[+] Waiting to clean and print..."

    for i in temp:
        if i.get_attribute("type") == "title":
            titles.append(i.text)

    driver.close()

    #Iterate through list of movie titles in netflix list, get informatino about them from the API,
    #and add them to the database

    for i in range(len(titles)):
        title = titles[i]
        t = title.encode('utf-8', 'replace')
        y = years[i].encode('utf-8', 'replace')
        rating = get_imdb(t, year=y)
        m = Movie(t, y, rating)
        mdb.db_update_movie(m)
        mdb.commit()
        movies.append(m)
        m.print_movie()

    print "[+] %d movies found in instant queue" % (len(movies))
    #for m in movies:
    #	m.print_movie()
    #print "dumping titles list"
    #print titles
    del mdb