示例#1
0
def create_UV_XYZ_dictionary(root_dir):
    '''
    classes = ['ape', 'benchviseblue', 'can', 'cat', 'driller', 'duck', 'glue', 'holepuncher',
               'iron', 'lamp', 'phone', 'cam', 'eggbox']
    '''
    classes = ['iron']
    # create a dictionary for UV to XYZ correspondence
    for label in classes:
        ptcld_file = root_dir + label + "/object.xyz"
        pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))
        # calculate u and v coordinates from the xyz point cloud file
        centre = np.mean(pt_cld_data, axis=0)
        length = np.sqrt((centre[0] - pt_cld_data[:, 0])**2 +
                         (centre[1] - pt_cld_data[:, 1])**2 +
                         (centre[2] - pt_cld_data[:, 2])**2)
        unit_vector = [(pt_cld_data[:, 0] - centre[0]) / length,
                       (pt_cld_data[:, 1] - centre[1]) / length,
                       (pt_cld_data[:, 2] - centre[2]) / length]
        u_coord = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0]) /
                         (2 * np.pi))
        v_coord = 0.5 - (np.arcsin(unit_vector[1]) / np.pi)
        u_coord = (u_coord * 255).astype(int)
        v_coord = (v_coord * 255).astype(int)
        # save the mapping as a pickle file
        dct = {}
        for u, v, xyz in zip(u_coord, v_coord, pt_cld_data):
            key = (u, v)
            if key not in dct:
                dct[key] = xyz
        save_obj(dct, root_dir + label + "/UV-XYZ_mapping")
示例#2
0
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")

# call the helper

from helper import load_obj, save_obj, init_driver, searchJobs, text_cleaner, get_pause, \
string_from_text

# 1- Load existing dictionary. Check for initial dictionary.
# If empty initialize

try:
	jobDict = load_obj('glassDoorDict')
	link =    load_obj('glassDoorlink')
except:
	save_obj([], 'glassDoorlink')
	save_obj({}, 'glassDoorDict')

	jobDict = load_obj('glassDoorDict')
	link =    load_obj('glassDoorlink')

print('len(jobDict) = '+str(len(jobDict))+ ', len(link) = '+str(len(link)))

# 2- Choose what you want to do:
#    get_link => Scraping for links and brief data,
#    get_data => Scraping for detailed data.


get_link = True ####&&&&
#get_link = False
main = sqlite3.connect('main.db')


# Functions
def xml_decoded(data_string):
    soup = BeautifulSoup(data_string)
    [x.extract() for x in soup.findAll('quote')]
    return unescape(soup.get_text())


results = defaultdict(list)
prev_author = ''
current_message_chain = ''

for data in main.execute(
        u"SELECT author, body_xml FROM Messages WHERE convo_id=1108 AND body_xml IS NOT NULL AND author IS NOT ''"
):
    current_author = data[0]
    if current_author == prev_author:
        # Continue message chain
        current_message_chain += u' ' + xml_decoded(data[1])
    elif current_author != prev_author:
        # Deposit old message chain
        results[prev_author].append(current_message_chain)
        # Start a new one
        current_message_chain = xml_decoded(data[1])
    prev_author = current_author

save_obj(results, 'skype_messages')
print('Skype message file updated!')
示例#4
0
    src_data = get_data('source_space')
    tgt_data = get_data('target_space')
    ####################################

    ####################################
    # start iterating over the files
    for filename in os.listdir(path_gen):
        if filename.endswith('.pkl'):
            name = filename.replace('.pkl', '')
            epoch = int(name.split('_')[1])
            temp = float(name.split('_')[2])

            data = hp.load_obj(f'{path_gen}{name}')
            novo = data['novo_tr']
            novo = get_n_random(novo, N_fcd, name)

            f_dist_src = get_frechet_dist(src_data, novo)
            f_dist_tgt = get_frechet_dist(tgt_data, novo)

            frechet_dist = {'f_dist_src': f_dist_src, 'f_dist_tgt': f_dist_tgt}

            hp.save_obj(frechet_dist, f'{save_path}frechet_{epoch}_{temp}')
            if verbose:
                print(
                    f'e: {epoch}, t: {temp}, FCD to src: {f_dist_src}, FCD to tgt: {f_dist_tgt}'
                )

    end = time.time()
    if verbose: print(f'FRECHET DISTANCE DONE in {end - start:.04} seconds')
    ####################################
示例#5
0
文件: train.py 项目: zhoushiqi47/DPOD
    for file in files:
        if file.endswith(".jpg"):  # images that exist
            list_all_images.append(os.path.join(root, file))

num_images = len(list_all_images)
indices = list(range(num_images))
np.random.seed(69)
np.random.shuffle(indices)
split = int(np.floor(args.split * num_images))
train_idx, test_idx = indices[:split], indices[split:]

print("Total number of images: ", num_images)
print(" Total number of training images: ", len(train_idx))
print(" Total number of testing images: ", len(test_idx))

save_obj(list_all_images, root_dir + "all_images_adr")
save_obj(train_idx, root_dir + "train_images_indices")
save_obj(test_idx, root_dir + "test_images_indices")

dataset_dir_structure(root_dir)

# Intrinsic Parameters of the Camera
fx = 572.41140
px = 325.26110
fy = 573.57043
py = 242.04899
intrinsic_matrix = np.array([[fx, 0, px], [0, fy, py], [0, 0, 1]])

classes = {
    'ape': 1,
    'benchviseblue': 2,
示例#6
0
num_images = len(list_all_images)
indices = list(range(num_images))
np.random.seed(args.randomseed)
np.random.shuffle(indices)
split = int(np.floor(float(args.split) * num_images))
train_idx, test_idx = indices[:split], indices[split:]

print("Total number of images: ", num_images)
print(" Total number of training images: ", len(train_idx))
print(" Total number of testing images: ", len(test_idx))

# Save the test/train split to the unique training dir
if not os.path.exists(train_eval_dir):
    os.makedirs(train_eval_dir)
save_obj(train_idx, os.path.join(train_eval_dir, "train_images_indices"))
save_obj(test_idx, os.path.join(train_eval_dir, "test_images_indices"))
# Create test/eval dir structure
test_dir_structure(train_eval_dir, classes)

# Only need to do this once on your dataset, it will generate ALL GT data,
# not only what you need for your train/val split. This takes longer initially
# but speeds up later development.
if not gt_exists:
    # Save all images list to the root dir
    save_obj(list_all_images, os.path.join(root_dir, "all_images_adr"))

    # Create GT dir structure
    ground_truth_dir_structure(root_dir, classes)

    # Intrinsic Parameters of the LineMOD Dataset Camera
#----------------------

gamma = 1
num_actions = 4
num_states = shape[0] * shape[1]
human_policy = init_nonrandom_policy()  #init_random_policy(25, 4)
human_performance = np.mean(evaluate_policy(env, human_policy))
print("human performance: ", human_performance)
saved_model = True

if not saved_model:
    learning_rate = 0.005
    untrained_agent = QLearning(num_actions, gamma, learning_rate)
    trained_agent = optimal_agent(env, untrained_agent, num_episodes=10000)
    q_star = trained_agent.q_table
    save_obj(dict(q_star), "q_star")
else:
    q_star = load_obj("q_star")
J_star = np.max(q_star[startState])
baseline = human_performance

shared = []
nq_cost = []
min_cost = []
mod_shared = []

alphas = np.concatenate((np.arange(0, 1e-3, 1e-4), np.arange(1e-3, 1e-2, 1e-3),
                         np.arange(1e-2, 0.1, 1e-2), np.arange(0.1, 1.01,
                                                               0.1)))
for alpha in tqdm(alphas):
    min_performance = baseline + alpha * (J_star - baseline)
main = sqlite3.connect("main.db")


# Functions
def xml_decoded(data_string):
    soup = BeautifulSoup(data_string)
    [x.extract() for x in soup.findAll("quote")]
    return unescape(soup.get_text())


results = defaultdict(list)
prev_author = ""
current_message_chain = ""

for data in main.execute(
    u"SELECT author, body_xml FROM Messages WHERE convo_id=1108 AND body_xml IS NOT NULL AND author IS NOT ''"
):
    current_author = data[0]
    if current_author == prev_author:
        # Continue message chain
        current_message_chain += u" " + xml_decoded(data[1])
    elif current_author != prev_author:
        # Deposit old message chain
        results[prev_author].append(current_message_chain)
        # Start a new one
        current_message_chain = xml_decoded(data[1])
    prev_author = current_author

save_obj(results, "skype_messages")
print("Skype message file updated!")