Esempio n. 1
0
import sys
import re
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")

# call the helper

from helper import load_obj, save_obj, init_driver, searchJobs, text_cleaner, get_pause, \
string_from_text

# 1- Load existing dictionary. Check for initial dictionary.
# If empty initialize

try:
	jobDict = load_obj('glassDoorDict')
	link =    load_obj('glassDoorlink')
except:
	save_obj([], 'glassDoorlink')
	save_obj({}, 'glassDoorDict')

	jobDict = load_obj('glassDoorDict')
	link =    load_obj('glassDoorlink')

print('len(jobDict) = '+str(len(jobDict))+ ', len(link) = '+str(len(link)))

# 2- Choose what you want to do:
#    get_link => Scraping for links and brief data,
#    get_data => Scraping for detailed data.

Esempio n. 2
0
        return data

    src_data = get_data('source_space')
    tgt_data = get_data('target_space')
    ####################################

    ####################################
    # start iterating over the files
    for filename in os.listdir(path_gen):
        if filename.endswith('.pkl'):
            name = filename.replace('.pkl', '')
            epoch = int(name.split('_')[1])
            temp = float(name.split('_')[2])

            data = hp.load_obj(f'{path_gen}{name}')
            novo = data['novo_tr']
            novo = get_n_random(novo, N_fcd, name)

            f_dist_src = get_frechet_dist(src_data, novo)
            f_dist_tgt = get_frechet_dist(tgt_data, novo)

            frechet_dist = {'f_dist_src': f_dist_src, 'f_dist_tgt': f_dist_tgt}

            hp.save_obj(frechet_dist, f'{save_path}frechet_{epoch}_{temp}')
            if verbose:
                print(
                    f'e: {epoch}, t: {temp}, FCD to src: {f_dist_src}, FCD to tgt: {f_dist_tgt}'
                )

    end = time.time()
Esempio n. 3
0
                                 out_channels_uv=256,
                                 bilinear=True)
# load the best weights from the training loop
correspondence_block.load_state_dict(
    torch.load('correspondence_block.pt', map_location=torch.device('cpu')))
pose_refiner = Pose_Refiner()
# load the best weights from the training loop
pose_refiner.load_state_dict(
    torch.load('pose_refiner.pt', map_location=torch.device('cpu')))

correspondence_block.cuda()
pose_refiner.cuda()
pose_refiner.eval()
correspondence_block.eval()

list_all_images = load_obj(root_dir + "all_images_adr")
testing_images_idx = load_obj(root_dir + "test_images_indices")

regex = re.compile(r'\d+')
upsampled = nn.Upsample(size=[240, 320], mode='bilinear', align_corners=False)
total_score = 0
for i in range(len(testing_images_idx)):

    img_adr = list_all_images[testing_images_idx[i]]
    label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]
    idx = regex.findall(os.path.split(img_adr)[1])[0]

    tra_adr = root_dir + label + "/data/tra" + str(idx) + ".tra"
    rot_adr = root_dir + label + "/data/rot" + str(idx) + ".rot"
    true_pose = get_rot_tra(rot_adr, tra_adr)
Esempio n. 4
0
File: eval.py Progetto: aja9675/DPOD
    torch.load(correspondence_block_filename,
               map_location=torch.device('cpu')))
pose_refiner = Pose_Refiner()

print("Loading pose_refiner block")
# load the best weights from the training loop
pose_refiner.load_state_dict(
    torch.load('pose_refiner.pt', map_location=torch.device('cpu')))

correspondence_block.cuda()
pose_refiner.cuda()
pose_refiner.eval()
correspondence_block.eval()

print("Listing all images")
list_all_images = load_obj(sys.path.join(root_dir, "all_images_adr"))
testing_images_idx = load_obj(
    sys.path.join(train_eval_dir, "test_images_indices"))

regex = re.compile(r'\d+')
upsampled = nn.Upsample(size=[240, 320], mode='bilinear', align_corners=False)
total_score = 0
print("For all %i test images..." % len(testing_images_idx))
for i in range(len(testing_images_idx)):
    if i % 100 == 0:
        print("\t %i / %i" % (i, len(testing_images_idx)))

    img_adr = list_all_images[testing_images_idx[i]]
    label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]
    idx = regex.findall(os.path.split(img_adr)[1])[0]
Esempio n. 5
0
def initial_pose_estimation(root_dir, train_eval_dir, classes,
                            intrinsic_matrix):

    for c in classes:
        class_pred_pose_fname = os.path.join(train_eval_dir, c,
                                             "predicted_pose")
        if not os.path.exists(class_pred_pose_fname):
            os.makedirs(class_pred_pose_fname)

    # LineMOD Dataset
    train_data = LineMODDataset(root_dir,
                                train_eval_dir,
                                classes=classes,
                                transform=transforms.Compose(
                                    [transforms.ToTensor()]))

    # load the best correspondence block weights
    correspondence_block = UNET.UNet(n_channels=3,
                                     out_channels_id=14,
                                     out_channels_uv=256,
                                     bilinear=True)
    correspondence_block.cuda()
    correspondence_block_filename = os.path.join(train_eval_dir,
                                                 'correspondence_block.pt')
    correspondence_block.load_state_dict(
        torch.load(correspondence_block_filename,
                   map_location=torch.device('cpu')))

    # initial 6D pose prediction
    regex = re.compile(r'\d+')
    outliers = 0
    for i in range(len(train_data)):
        if i % 100 == 0:
            print(str(i) + "/" + str(len(train_data)) + " finished!")
        img_adr, img, idmask, _, _ = train_data[i]
        label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]
        idx = regex.findall(os.path.split(img_adr)[1])[0]
        img = img.view(1, img.shape[0], img.shape[1], img.shape[2])
        idmask_pred, umask_pred, vmask_pred = correspondence_block(img.cuda())
        # convert the masks to 240,320 shape
        temp = torch.argmax(idmask_pred, dim=1).squeeze().cpu()
        upred = torch.argmax(umask_pred, dim=1).squeeze().cpu()
        vpred = torch.argmax(vmask_pred, dim=1).squeeze().cpu()
        coord_2d = (temp == classes[label]).nonzero(as_tuple=True)

        adr = os.path.join(
            train_eval_dir,
            label + "/predicted_pose/" + "info_" + str(idx) + ".txt")

        coord_2d = torch.cat(
            (coord_2d[0].view(coord_2d[0].shape[0], 1), coord_2d[1].view(
                coord_2d[1].shape[0], 1)), 1)
        uvalues = upred[coord_2d[:, 0], coord_2d[:, 1]]
        vvalues = vpred[coord_2d[:, 0], coord_2d[:, 1]]
        dct_keys = torch.cat((uvalues.view(-1, 1), vvalues.view(-1, 1)), 1)
        dct_keys = tuple(dct_keys.numpy())
        dct = load_obj(os.path.join(root_dir, label + "/UV-XYZ_mapping"))
        mapping_2d = []
        mapping_3d = []
        for count, (u, v) in enumerate(dct_keys):
            if (u, v) in dct:
                mapping_2d.append(np.array(coord_2d[count]))
                mapping_3d.append(dct[(u, v)])
        # Get the 6D pose from rotation and translation matrices
        # PnP needs atleast 6 unique 2D-3D correspondences to run
        if len(mapping_2d) >= 4 or len(mapping_3d) >= 4:
            _, rvecs, tvecs, inliers = cv2.solvePnPRansac(
                np.array(mapping_3d, dtype=np.float32),
                np.array(mapping_2d, dtype=np.float32),
                intrinsic_matrix,
                distCoeffs=None,
                iterationsCount=150,
                reprojectionError=1.0,
                flags=cv2.SOLVEPNP_P3P)
            rot, _ = cv2.Rodrigues(rvecs, jacobian=None)
            rot[np.isnan(rot)] = 1
            tvecs[np.isnan(tvecs)] = 1
            tvecs = np.where(-100 < tvecs, tvecs, np.array([-100.]))
            tvecs = np.where(tvecs < 100, tvecs, np.array([100.]))
            rot_tra = np.append(rot, tvecs, axis=1)
            # save the predicted pose
            np.savetxt(adr, rot_tra)
        else:  # save a pose full of zeros
            outliers += 1
            rot_tra = np.ones((3, 4))
            rot_tra[:, 3] = 0
            np.savetxt(adr, rot_tra)
    print("Number of instances where PnP couldn't be used: ", outliers)
gamma = 1
num_actions = 4
num_states = shape[0] * shape[1]
human_policy = init_nonrandom_policy()  #init_random_policy(25, 4)
human_performance = np.mean(evaluate_policy(env, human_policy))
print("human performance: ", human_performance)
saved_model = True

if not saved_model:
    learning_rate = 0.005
    untrained_agent = QLearning(num_actions, gamma, learning_rate)
    trained_agent = optimal_agent(env, untrained_agent, num_episodes=10000)
    q_star = trained_agent.q_table
    save_obj(dict(q_star), "q_star")
else:
    q_star = load_obj("q_star")
J_star = np.max(q_star[startState])
baseline = human_performance

shared = []
nq_cost = []
min_cost = []
mod_shared = []

alphas = np.concatenate((np.arange(0, 1e-3, 1e-4), np.arange(1e-3, 1e-2, 1e-3),
                         np.arange(1e-2, 0.1, 1e-2), np.arange(0.1, 1.01,
                                                               0.1)))
for alpha in tqdm(alphas):
    min_performance = baseline + alpha * (J_star - baseline)
    #mincost
    """
Esempio n. 7
0
parser.add_argument("--split", default=0.15, help="train:test split ratio")
parser.add_argument("--randomseed",
                    default=69,
                    help="train:test split random seed")
args = parser.parse_args()

root_dir = args.root_dir
background_dir = args.bgd_dir
train_eval_dir = args.train_eval_dir

# Pickling this eliminates the need to walk the dir which takes a while, especially
# on Google Colab when using a mounted drive
if os.path.exists(os.path.join(root_dir, "all_images_adr.pkl")):
    print("all_images_adr.pkl found. Assuming GT exists")
    gt_exists = True
    list_all_images = load_obj(os.path.join(root_dir, "all_images_adr"))
else:
    gt_exists = False
    list_all_images = []
    for root, dirs, files in os.walk(root_dir):
        for file in files:
            if file.endswith(".jpg"):  # images that exist
                list_all_images.append(os.path.join(root, file))

if os.path.exists(os.path.join(train_eval_dir, "train_images_indices.pkl")):
    sys.exit("train_images_indices.pkl found. Nothing to do.")

num_images = len(list_all_images)
indices = list(range(num_images))
np.random.seed(args.randomseed)
np.random.shuffle(indices)
Esempio n. 8
0
def create_GT_masks(root_dir, background_dir, intrinsic_matrix, classes):
    """
    Helper function to create the Ground Truth ID,U and V masks
        Args:
        root_dir (str): path to the root directory of the dataset
        background_dir(str): path t
        intrinsic_matrix (array): matrix containing camera intrinsics
        classes (dict) : dictionary containing classes and their ids
        Saves the masks to their respective directories
    """
    list_all_images = load_obj(root_dir + "all_images_adr")
    training_images_idx = load_obj(root_dir + "train_images_indices")
    for i in range(len(training_images_idx)):
        img_adr = list_all_images[training_images_idx[i]]
        label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]
        regex = re.compile(r'\d+')
        idx = regex.findall(os.path.split(img_adr)[1])[0]

        if i % 1000 == 0:
            print(str(i) + "/" + str(len(training_images_idx)) + " finished!")

        image = cv2.imread(img_adr)
        ID_mask = np.zeros((image.shape[0], image.shape[1]))
        U_mask = np.zeros((image.shape[0], image.shape[1]))
        V_mask = np.zeros((image.shape[0], image.shape[1]))

        ID_mask_file = root_dir + label + \
            "/ground_truth/IDmasks/color" + str(idx) + ".png"
        U_mask_file = root_dir + label + \
            "/ground_truth/Umasks/color" + str(idx) + ".png"
        V_mask_file = root_dir + label + \
            "/ground_truth/Vmasks/color" + str(idx) + ".png"

        tra_adr = root_dir + label + "/data/tra" + str(idx) + ".tra"
        rot_adr = root_dir + label + "/data/rot" + str(idx) + ".rot"
        rigid_transformation = get_rot_tra(rot_adr, tra_adr)

        # Read point Point Cloud Data
        ptcld_file = root_dir + label + "/object.xyz"
        pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))
        ones = np.ones((pt_cld_data.shape[0], 1))
        homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)

        # Perspective Projection to obtain 2D coordinates for masks
        homogenous_2D = intrinsic_matrix @ (
            rigid_transformation @ homogenous_coordinate.T)
        coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]
        coord_2D = ((np.floor(coord_2D)).T).astype(int)
        x_2d = np.clip(coord_2D[:, 0], 0, 639)
        y_2d = np.clip(coord_2D[:, 1], 0, 479)
        ID_mask[y_2d, x_2d] = classes[label]

        if i % 100 != 0:  # change background for every 99/100 images
            background_img_adr = background_dir + \
                random.choice(os.listdir(background_dir))
            background_img = cv2.imread(background_img_adr)
            background_img = cv2.resize(background_img,
                                        (image.shape[1], image.shape[0]),
                                        interpolation=cv2.INTER_AREA)
            background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]
            # background_img = cv2.cvtColor(background_img, cv2.COLOR_BGR2RGB)
            background_adr = root_dir + label + \
                "/changed_background/color" + str(idx) + ".png"
            cv2.imwrite(background_adr, background_img)

        # Generate Ground Truth UV Maps
        centre = np.mean(pt_cld_data, axis=0)
        length = np.sqrt((centre[0] - pt_cld_data[:, 0])**2 +
                         (centre[1] - pt_cld_data[:, 1])**2 +
                         (centre[2] - pt_cld_data[:, 2])**2)
        unit_vector = [(pt_cld_data[:, 0] - centre[0]) / length,
                       (pt_cld_data[:, 1] - centre[1]) / length,
                       (pt_cld_data[:, 2] - centre[2]) / length]
        U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0]) / (2 * np.pi))
        V = 0.5 - (np.arcsin(unit_vector[1]) / np.pi)
        U_mask[y_2d, x_2d] = U
        V_mask[y_2d, x_2d] = V

        # Saving ID, U and V masks after using the fill holes function
        ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)
        cv2.imwrite(ID_mask_file, ID_mask)
        cv2.imwrite(U_mask_file, U_mask * 255)
        cv2.imwrite(V_mask_file, V_mask * 255)
Esempio n. 9
0
#modelg= KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True,limit=40000)

url='https://raw.githubusercontent.com/Pratikmehta1729/walkover/master/eng_sites_dataset_2.0_shuffled.csv'


#get titles,l,title_map
df = pd.read_csv(url)
l=[]
titles=[]
y=df.title.tolist()
x=df.text.tolist()

title_map={}
count=0
for title in y:
  title_map[title]=count
  titles.append(title)
  count+=1

for item in x:
    l.append(item.split())

sentences = l
modelg = Word2Vec(sentences, min_count=1,size=200)
kmeans=load_obj("kmeans")
X=load_obj("X")
labels=load_obj("labels")
centroids=load_obj("centroids")
final_dict=load_obj("final_dict")

import pandas as pd

# saving graph/doc embeddings
path_to_data = 'fill me'

#####==================PCA===========================##############
gl_embds = np.save(path_to_data + 'graph_mv.npy')
k = 50
pca = PCA(n_components=k)
gl_pca = pca.fit_transform(gl_embds)
SAVE = True
if SAVE:
    np.save(path_to_data + 'gl_pca.npy', gl_pca)
del gl_embds

graphs = load_obj(path_to_data, 'graphs')
# Rearranging the embeddings into the initial order
SAVE = False
if SAVE:
    print('unstructued emveddings')
    ml_embds = np.load(path_to_data + 'ml_embds_mv.npy')
    print('original embeddings')
    embds = np.load(path_to_data + 'embeddings.npy')

    print('building embeddings matrix...\n')
    embeddings_mv = np.zeros((embds.shape[0], ml_embds.shape[2]))
    for i, graph in enumerate(graphs):
        for j, node in enumerate(graph.nodes()):
            embeddings_mv[node] = ml_embds[i, j, :]

        sys.stderr.write('\rGraph: %d/%d' % (i + 1, len(graphs)))
Esempio n. 11
0
 def __init__(self, root_dir, classes=None, transform=None):
     self.root_dir = root_dir
     self.transform = transform
     self.classes = classes
     self.list_all_images = load_obj(root_dir + "all_images_adr")
     self.training_images_idx = load_obj(root_dir + "train_images_indices")
Esempio n. 12
0
from helper import load_obj

messages = load_obj('skype_messages')
print(messages.keys())