def run_train_test(seed_image_id,
                   filedata,
                   max_value_cutoff,
                   test_id,
                   multi_image_training=False):
    create_single_lmdb(seed_image_id, filedata, test_id, multi_image_training)
    for test_id in range(0, test_id + 1):
        create_test_script(seed_image_id, test_id, multi_image_training)
    run_script(train_dir + str(seed_image_id) + '/train-single-coin-lmdbs.sh')
    run_script(test_dir + str(test_id) + '/test-' + str(seed_image_id) + '.sh')
    read_test([seed_image_id], test_id)
    # in the metadata dir rm *.png
    image_set.read_results(max_value_cutoff, data_dir, [seed_image_id])
Beispiel #2
0
def link_seed_by_graph(seed_id, cut_off, min_connections, max_depth):
    image_set.read_results(cut_off, data_dir, seeds_share_test_images=False, remove_widened_seeds=True)
    image_set.save_widened_seeds(data_dir, seed_id, cut_off)
    image_set.read_results(cut_off, data_dir, seeds_share_test_images=True, remove_widened_seeds=True)
    save_graph()
    most_connected_seeds = image_set.find_most_connected_seeds(data_dir, seed_id, min_connections, max_depth)
    filedata = []
    if len(most_connected_seeds) != 0:
        # image_set.create_composite_image(crop_dir, data_dir, 130, 30, 10, most_connected_seeds.iterkeys())
        for seed_image_id, values in most_connected_seeds.iteritems():
            print values
            filedata.append([seed_image_id, crop_dir + str(seed_image_id) + '.png', values[2]])
    print 'Count of images linked by graph:', len(most_connected_seeds)
    image_set.create_composite_image_from_filedata(crop_dir, data_dir, 140, rows=150, cols=10, filedata=filedata)
    if len(filedata) > 9:
        run_train_test(seed_id, filedata, cut_off, test_id=5, multi_image_training=True)
        run_test(seed_id, cut_off, test_id=5)
        read_all_results(cut_off)
    else:
        print 'Not enough seeds found'
Beispiel #3
0
def read_all_results(cut_off=0,
                     seed_image_ids=None,
                     seeds_share_test_images=True,
                     remove_widened_seeds=False):
    image_set.read_results(cut_off, data_dir, seed_image_ids,
                           seeds_share_test_images, remove_widened_seeds)
Beispiel #4
0
def run_test(seed_image_id, max_value_cutoff, test_id):
    for test_id in range(0, test_id + 1):
        run_script(test_dir + str(test_id) + '/test-' + str(seed_image_id) +
                   '.sh')
    read_test([seed_image_id], test_id)
    image_set.read_results(max_value_cutoff, data_dir, [seed_image_id])
Beispiel #5
0
#     seed_images = seed_image_data[int(seed_image_id / 100)]
#     for image_id in seed_images:
#         test_image_id = seed_image_id + image_id
#         filedata.append([test_image_id, crop_dir + str(test_image_id) + '.png', 0])
#     # # the test_id = 5 just adds more data for now:
#     # create_single_lmdb(seed_image_id, filedata, 0, True, 700)
#     # run_script(train_dir + str(seed_image_id) + '/train-single-coin-lmdbs.sh')
#     create_test_script(seed_image_id, 0, True)
#     scripts_to_run.append(test_dir + str(0) + '/test-' + str(seed_image_id) + '.sh')
#     #run_script(test_dir + str(0) + '/test-' + str(seed_image_id) + '.sh')
# run_scripts(scripts_to_run,max_workers=6)
#
# read_test(seed_image_ids, 360)

# image_set.read_results(0, data_dir, seeds_share_test_images=False, bad_coin_ids=bad_coin_ids, ground_truth=ground_truth)
image_set.read_results(0, data_dir, seeds_share_test_images=False)
multi_point_error_test_image_ids = get_multi_point_error_test_image_ids()
print 'The following test_image_ids where taking out of the image:'
print multi_point_error_test_image_ids
print 'multi_point_error_test_image_ids length:' + str(
    len(multi_point_error_test_image_ids))
image_set.create_composite_images(crop_dir, data_dir, 125, 40, 10, None,
                                  multi_point_error_test_image_ids, True)
#image_set.create_composite_images(crop_dir, data_dir, 125, 40, 10, None, multi_point_error_test_image_ids, True)
#image_set.create_composite_image(crop_dir, data_dir, 140, 100, 10, multi_point_error_test_image_ids)

print 'Done in %s seconds' % (time.time() - start_time, )
sys.exit("End")

# ********
# Step 2:
Beispiel #6
0
def run_multi_point():
    init_dir()
    start_time = time.time()
    test_image_ids = []
    new_test_image_ids = []
    new_seed_image_ids = []
    count = 0

    seed_image_ids = pickle.load(open(data_dir + 'seed_image_ids_all.pickle', "rb"))
    for coin_id in seed_image_ids:
        if (count < 99999999) and (coin_id % 2 == 0):
            new_seed_image_ids.append(coin_id * 100)
            new_seed_image_ids.append((coin_id +3) * 100)
            for image_id in range(0,57):
                new_test_image_ids.append(coin_id * 100 + image_id)
                new_test_image_ids.append((coin_id +3) * 100 + image_id)
            count += 2

    test_image_ids = sorted(new_test_image_ids)


    #seed_image_ids = sorted(new_seed_image_ids)
    #pickle.dump(seed_image_ids, open(data_dir + 'seed_image_ids.pickle', "wb"))
    pickle.dump(test_image_ids, open(data_dir + 'test_image_ids.pickle', "wb"))
    #save_multi_point_ids()

    seed_image_data = pickle.load(open(data_dir + 'multi_point_ids.pickle', "rb"))
    seed_image_ids = (200,1100)
    scripts_to_run = []
    images_per_angle = 200
    #seed_image_ids = pickle.load(open(data_dir + 'seed_image_ids.pickle', "rb"))
    #test_image_ids = pickle.load(open(data_dir + 'seed_image_ids.pickle', "rb"))

    #create_test_lmdb_batches(test_image_ids,seed_image_ids,1)
    test_batch_ids = []
    for test_image_id in test_image_ids:
        test_batch_id = test_image_id / 1000
        if test_batch_id not in test_batch_ids:
            test_batch_ids.append(test_batch_id)

    for seed_image_id in seed_image_ids:
        pass
        # filedata = []
        # seed_images = seed_image_data[int(seed_image_id / 100)]
        # for image_id in seed_images:
        #     test_image_id = seed_image_id + image_id
        #     filename = get_filename_from(test_image_id)
        #     filedata.append([test_image_id, filename, 0])
        #create_single_lmdb(seed_image_id, filedata, 0, True, images_per_angle)
        #run_script(train_dir + str(seed_image_id) + '/train-single-coin-lmdbs.sh')
    #     for test_batch_id in test_batch_ids:
    #         filename = test_dir + str(test_batch_id) + '/' + str(seed_image_id) + '.dat'
    #         if os.path.isfile(filename):
    #             file_size = os.path.getsize(filename)
    #             if file_size > 0:
    #                 print 'Exists:', filename
    #                 continue
    #         create_test_script(seed_image_id,test_batch_id,True)
    #         scripts_to_run.append(test_dir + str(test_batch_id) + '/test-' + str(seed_image_id) + '.sh')
    #
    # run_scripts(scripts_to_run,max_workers=6)
    #read_test(test_batch_ids,seed_image_ids)

    # ********
    # Step 2:
    # Then widen the seed to include all crops in all results for each seed:
    # Check out the results in the png
    # Note the cutoff
    # This should be changed to include the step 3 double check

    # for seed_image_id in widen_seed_image_ids:
    #     cutoff = 13
    #     filedata = get_single_lmdb_multi_point_filedata(seed_image_id, cutoff, multi_point_error_test_image_ids)
    #     create_single_lmdb(seed_image_id, filedata, 0, True, 2800, retraining=True)
    #     run_script(train_dir + str(seed_image_id) + '/train-single-coin-lmdbs.sh')
    #     run_script(test_dir + str(0) + '/test-' + str(seed_image_id) + '.sh')
    # read_test(seed_image_ids, 360)

    # # image_set.read_results(0, data_dir, seeds_share_test_images=False, bad_coin_ids=bad_coin_ids, ground_truth=ground_truth)
    # image_set.read_results(0, data_dir, seeds_share_test_images=False)
    # multi_point_error_test_image_ids = get_multi_point_error_test_image_ids()
    # print 'The following test_image_ids where taking out of the image:'
    # print multi_point_error_test_image_ids
    # print 'multi_point_error_test_image_ids length:' + str(len(multi_point_error_test_image_ids))
    # image_set.create_composite_images(crop_dir, data_dir, 125, 40, 10, None, multi_point_error_test_image_ids, True)
    # #image_set.create_composite_images(crop_dir, data_dir, 125, 40, 10)
    #Dates  ************************************************************************************
    # filename = data_dir + 'good_coin_ids.pickle'
    # if os.path.exists(filename):
    #     good_coin_ids = set(pickle.load(open(filename, "rb")))
    # image_set.read_results(0, data_dir, seeds_share_test_images=False,remove_coin_ids=good_coin_ids)

    image_set.read_results(0, data_dir, seeds_share_test_images=False)
    multi_point_error_test_image_ids, coin_angles,total_coin_results = get_errors_and_angles(False)
    print total_coin_results

    # Create a composite image for dates:
    #save_good_test_ids is not correct this needs a database:
    #image_set.save_good_test_ids(data_dir, 1100,0,multi_point_error_test_image_ids)#image_set.save_good_test_ids(data_dir, 200,31.4,multi_point_error_test_image_ids)

    #image_set.create_composite_images(crop_dir, data_dir, 125, 40, 10, None, multi_point_error_test_image_ids, True)
    #image_set.create_composite_images(crop_dir, data_dir, 125, 40, 10, None,[], True)
    ground_truth_designs = get_ground_truth_designs(total_coin_results,for_dates=False)
    #image_set.create_composite_image_ground_truth_designs(crop_dir, data_dir, 125, 50, 10,coin_angles,ground_truth_designs,False,True,True)
    image_set.create_date_composite_image(crop_dir, data_dir, 1100, 200000, coin_angles,ground_truth_designs,False,True)