示例#1
0
    def exit_driver(self):
        driv_pos = self.driver.worldPosition
        driv_dim = utils.get_dimensions(self.driver)
        sens_pos = self.door_sensor.worldPosition
        sens_dim = utils.get_dimensions(self.door_sensor)
        sens_axx = self.door_sensor.getAxisVect((1, 0, 0))
        worl_axz = Vector((0, 0, 1))

        for dir in (-1, 1):
            driv_edg = sens_pos + sens_axx * dir * (sens_dim.x +
                                                    driv_dim.x) * 0.5

            #utils.draw_line(driv_pos, driv_edg)

            hit_obj, _, _ = self.rayCast(driv_edg, driv_pos)
            if hit_obj is None:
                driv_cen = sens_pos + sens_axx * dir * sens_dim.x * 0.5
                driv_off_pos = worl_axz * driv_dim.z * 0.5
                driv_off_neg = worl_axz * (driv_dim.z * 0.5 + RAY_OFFSET)
                driv_top = driv_cen + driv_off_pos
                driv_bot = driv_cen - driv_off_neg

                #utils.draw_line(driv_top, driv_bot)

                _, hit_pos, _ = self.rayCast(driv_bot, driv_top)
                if hit_pos is not None:
                    self.driver.worldPosition = hit_pos + driv_off_pos
                    self.remove_driver()
                    return True

        return False
def face_array(df_initial):
    faces = []
    pixels = df_initial['pixels'].tolist()
    width, height = get_dimensions(df_initial)

    for sequence in pixels:
        face = [int(pixel) for pixel in sequence.split()]
        face = np.asarray(face).reshape(width, height)
        faces.append(face.astype(np.uint8))
    faces = np.asarray(faces)
    return faces
def load_embedding(path,
                   format="text",
                   vocabulary=None,
                   length_normalize=True,
                   normalize_dimensionwise=False,
                   to_unicode=True,
                   lower=False,
                   path2='',
                   delete_duplicates=False,
                   method_vgg="delete"):
    assert format in ["text", "bin", "senna", "vgg",
                      "DT_embeddings"], "Unrecognized format"

    if vocabulary is not None:
        if len(set(vocabulary)) != len(vocabulary):
            logging.warning(
                "Provided vocabulary has duplicates. IMPORTANT NOTE: The embedding that this function will return will not have duplicates."
            )

    if format == "text":
        dims_restriction = get_dimensions(path)
        vocab, matrix = from_TXT(path, vocabulary, dims_restriction)

    if format == "bin":
        vocab, matrix = from_BIN(path, vocabulary)

    if format == "senna":
        vocab, matrix = from_SENNA(path, path2)

    if format == "vgg":
        vocab, matrix = from_vgg(path, method_vgg)

    if format == "DT_embeddings":
        vocab, matrix = from_DT_embeddings(path, path2)

    if delete_duplicates:
        remove_duplicates(vocab, matrix)

    vocabulary = Vocabulary(vocab, to_unicode, lower)
    e = Embedding(vocabulary=vocabulary, vectors=matrix)

    if normalize_dimensionwise:
        e.length_normalize_dimensionwise()

    if length_normalize:
        e.length_normalize()

    return e
示例#4
0
    def add_constraint(self):

        if self.constraint is not None:
            return

        # create and store vehicle constraint

        constraint = constraints.createConstraint(
            self.getPhysicsId(), 0, constraints.VEHICLE_CONSTRAINT)
        self.constraint = constraints.getVehicleConstraint(
            constraint.getConstraintId())

        # move wheels to vehicle constraint and set values (and remove collision objects)

        for i, wheel in enumerate(self.wheels):
            wheel.removeParent()

            susp_rest_len = self.WHEELS_SUSP_REST_LEN[i]
            attach_pos = self.wheels[wheel].xyz
            attach_pos.z += susp_rest_len
            down_dir = WHEELS_DOWN_DIR
            axle_dir = WHEELS_AXLE_DIR
            radius = (utils.get_dimensions(wheel).z * wheel.localScale.z) * 0.5
            has_steering = WHEELS_HAS_STEERING[i]

            self.constraint.addWheel(wheel, attach_pos, down_dir, axle_dir,
                                     susp_rest_len, radius, has_steering)

            self.constraint.setTyreFriction(self.FRICTION_VAL, i)
            self.constraint.setSuspensionDamping(self.DAMPING_VAL, i)
            self.constraint.setSuspensionCompression(self.COMPRESSION_VAL, i)
            self.constraint.setSuspensionStiffness(self.STIFFNESS_VAL, i)
            self.constraint.setRollInfluence(self.ROLL_VAL, i)

            if self.wheel_col_name in wheel.children:
                wheel.children[self.wheel_col_name].endObject()

        # apply steering value

        self.constraint.setSteeringValue(self.steering_val, 0)
        self.constraint.setSteeringValue(self.steering_val, 1)
示例#5
0
def create_emoji_filters(input_file, emoji_amount, emoji_size):
    comp_name = '0:v'
    seconds = get_total_seconds(input_file)
    width, height = get_dimensions(input_file)

    emoji_filters = []
    emoji_amt = emoji_amount * make_random_value([0.75, 0.95])
    smallest_dimension = min(width, height)
    random_emojis = get_random_emojis(int(seconds * emoji_amt),
                                      (smallest_dimension * emoji_size))
    emoji_keys = random_emojis.keys()

    for idx, emoji in enumerate(emoji_keys):
        size = random_emojis[emoji]['size']
        filter_str = random_emojis[emoji]['filter_str']

        max_x = width - size
        max_y = height - size
        pos_x = make_random_value([0, max_x])
        pos_y = make_random_value([0, max_y])

        dur = make_random_value([2, 5])
        max_start = seconds - dur
        start = make_random_value([0, max_start])
        end = start + dur

        new_comp = 'comp_{}'.format(idx)
        overlay = "overlay={}:{}:enable='between(t, {}, {})'".format(
            pos_x, pos_y, start, end)
        emoji_filter = ';'.join(
            [filter_str, '[{}][{}]{}'.format(comp_name, emoji, overlay)])

        if idx < (len(emoji_keys) - 1):
            emoji_filter += '[{}];'.format(new_comp)
            comp_name = new_comp

        emoji_filters.append(emoji_filter)

    return emoji_filters
示例#6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--embedding", type=str, required=True)
    parser.add_argument("-c", "--emb_4_generation", type=str, required=True)
    parser.add_argument("-d", "--dataset", type=str, required=True)
    parser.add_argument("-b", "--batch_size", type=int, default=1024)
    parser.add_argument("-k", "--num_nearest_neighbor", type=int, default=10)

    args = parser.parse_args()

    dims = get_dimensions(args.embedding)

    if dims != get_dimensions(args.emb_4_generation):
        raise ValueError(
            "All the embeddings must have the same number of dimensions and the embeddings must be in the word2vec format"
        )

    printTrace("Reading vocab...")

    vocab_emb = vocab_from_path(args.embedding)
    vocab_cross = vocab_from_path(args.emb_4_generation)
    dataset = get_dataset(args.dataset)
    vocab_to_generate = list(
        set(np.append((dataset.X[:, 0]), (dataset.X[:, 1]))))
    vocab_to_generate_set = set(vocab_to_generate)
    vocab_emb_delete = [x for x in vocab_emb if x not in vocab_to_generate_set]

    total_vocab = set.union(set(vocab_emb_delete), set(vocab_cross))
    interset_vocab = list(
        set.intersection(set(vocab_emb_delete), set(vocab_cross)))

    print("Final embedding will have " + str(len(total_vocab)) + " words")
    print("We will generate " + str(len(vocab_to_generate)) + " words")

    emb = load_embedding(
        args.emb_4_generation,
        vocabulary=None,
        lower=False,
        length_normalize=True,
        normalize_dimensionwise=False,
        delete_duplicates=True,
    )

    m = emb.words_to_matrix(vocab_to_generate)
    M = emb.words_to_matrix(interset_vocab)

    nn = []

    for i_batch, mb in enumerate(batch(m, args.batch_size)):

        string = ("<" + str(datetime.datetime.now()) + ">  " +
                  "Using Embedding " + str(args.emb_4_generation) +
                  " to generate vocab for Embedding " + str(args.embedding) +
                  ":  " + str(int(100 *
                                  (i_batch * args.batch_size) / len(m))) + "%")
        print(string, end="\r")

        # print(np.asarray(mb).shape)
        # print(np.asarray(M).shape)

        result = cosine_knn(mb, M, args.num_nearest_neighbor)

        for i_result, indexes in enumerate(result):
            nn.append([interset_vocab[i] for i in indexes])

    del emb

    printTrace("===> Generating new_vocab <===")

    emb = load_embedding(
        args.embedding,
        vocabulary=vocab_emb_delete,
        lower=False,
        length_normalize=False,
        normalize_dimensionwise=False,
        delete_duplicates=True,
    )

    new_vectors = []
    for i_word, word in enumerate(vocab_to_generate):
        if i_word % 1000 == 0:
            string = ("<" + str(datetime.datetime.now()) + ">  " +
                      "Generating vocab " + ": " +
                      str(int(100 * i_word / len(vocab_to_generate))) + "%")
            print(string, end="\r")

        try:
            lw = nn[i_word]
            v = np.zeros([dims], dtype=float)
            for word_nn in lw:
                v += emb.word_to_vector(word_nn)

        except KeyError as r:
            raise ValueError(
                "Something went wrong in the word generation process")

        new_vectors.append(v / args.num_nearest_neighbor)

    print()

    del emb

    printTrace("===> Loading embeddings to compare <===")
    emb_generated = Embedding(vocabulary=Vocabulary(vocab_to_generate),
                              vectors=new_vectors)
    emb_original = load_embedding(
        args.embedding,
        vocabulary=vocab_to_generate,
        lower=False,
        length_normalize=False,
        normalize_dimensionwise=False,
        delete_duplicates=True,
    )

    printTrace("===> Evaluate <===")

    print("Original Embedding: ", end="")
    print(
        similarity_emd(
            emb_original,
            dataset.X,
            dataset.y,
            backoff_vector=None,
            lower=False,
            lang1prefix=None,
            lang2prefix=None,
        ))
    print("Generated Embedding: ", end="")
    print(
        similarity_emd(
            emb_generated,
            dataset.X,
            dataset.y,
            backoff_vector=None,
            lower=False,
            lang1prefix=None,
            lang2prefix=None,
        ))
示例#7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--embeddings", nargs="+", required=True)
    parser.add_argument("-t", "--rotate_to", required=True)
    parser.add_argument("-o", "--output", required=True)
    parser.add_argument("-v", "--vocabulary", default=None)
    parser.add_argument("-b", "--batch_size", type=int, default=256)
    parser.add_argument("-k", "--num_nearest_neighbor", type=int, default=10)
    parser.add_argument("-r", "--retrofitting", default=None)
    parser.add_argument("-rn", "--retrofitting_n_iters", type=int, default=10)
    # parser.add_argument('-n', '--do_not_normalize_embs', default=False)
    parser.add_argument("-ir", "--do_not_retrofit_rotate_to", default=False)
    parser.add_argument("-nc", "--do_not_clean_files", default=False)
    parser.add_argument("-oov", "--generate_oov_words", action="store_false")

    args = parser.parse_args()

    is_rot_in_input = None

    for emb_i, emb in enumerate(args.embeddings):
        if emb == args.rotate_to:
            is_rot_in_input = emb_i

    if not os.path.exists("tmp"):
        os.makedirs("tmp")

    print(
        "tmp folder created, it will be deleted at the end of the execution (unless you have run the program with the -nc True option)"
    )

    if args.retrofitting is not None:
        printTrace("==> Retrofitting <==")
        for emb_i, emb in enumerate(args.embeddings):
            string = (str(emb_i + 1) + " of " + str(
                len(args.embeddings) if is_rot_in_input is not None or args.
                do_not_retrofit_rotate_to else str(len(args.embeddings) + 1)))
            print(string)
            excec_com = ("python3 Retrofitting/retrofit.py -i " + str(emb) +
                         " -l " + str(args.retrofitting) + " -n " +
                         str(args.retrofitting_n_iters) + " -o " + "tmp/" +
                         str(emb_i) + ".retro -d " + str(get_dimensions(emb)))
            print(excec_com)
            os.system(excec_com)

        if is_rot_in_input is not None and not args.do_not_retrofit_rotate_to:
            string = (str(len(args.embeddings + 1)) + " of " +
                      str(len(args.embeddings)) if is_rot_in_input is not None
                      or args.do_not_retrofit_rotate_to else
                      str(len(args.embeddings) + 1))
            print(string)
            excec_com = ("python3 Retrofitting/retrofit.py -i " +
                         str(args.rotate_to) + " -l " +
                         str(args.retrofitting) + " -n " +
                         str(args.retrofitting_n_iters) + " -o " + "tmp/" +
                         "out.retro  -d " + str(get_dimensions(emb)))
            print(excec_com)
            os.system(excec_com)

        print()

    printTrace("==> Generating dictionaries for the mapping <==")

    for emb_i, emb in enumerate(args.embeddings):
        string = str(emb_i + 1) + " of " + str(len(args.embeddings))
        print(string)
        print_dictionary_for_vecmap(
            "tmp/" + str(emb_i) + ".dict",
            generate_dictionary_for_vecmap(path1=emb, path2=args.rotate_to),
        )

    print()

    printTrace("==> Normalizing Embeddings <==")

    for emb_i, emb in enumerate(args.embeddings):
        string = (str(emb_i + 1) + " of " + str(
            len(args.embeddings) if is_rot_in_input is not None else str(
                len(args.embeddings) + 1)))
        print(string)
        excec_com = ("python3 VecMap/normalize_embeddings.py unit center -i " +
                     (emb if args.retrofitting is None else "tmp/" +
                      str(emb_i) + ".retro") + " -o tmp/" + str(emb_i) +
                     ".norm")
        print(excec_com)
        os.system(excec_com)

    if is_rot_in_input is None:
        string = str(len(args.embeddings) +
                     1) + " of " + str(len(args.embeddings) + 1)
        print(string)
        excec_com = ("python3 VecMap/normalize_embeddings.py unit center -i " +
                     (args.rotate_to if args.retrofitting is None
                      or args.do_not_retrofit_rotate_to else "tmp/out.retro") +
                     " -o tmp/out.norm")
        print(excec_com)
        os.system(excec_com)

    print()

    printTrace("==> Mapping Embeddings <==")

    for emb_i, emb in enumerate(args.embeddings):

        if is_rot_in_input is None or (is_rot_in_input is not None
                                       and is_rot_in_input != emb_i):

            string = (str(emb_i + 1) + " of " +
                      str(len(args.embeddings) - 1) if is_rot_in_input
                      is not None else str(len(args.embeddings) + 1))
            print(string)

            source_input = "tmp/" + str(emb_i) + ".norm"
            target_input = ("tmp/out.norm" if is_rot_in_input is None else
                            "tmp/" + str(is_rot_in_input) + ".norm")
            source_output = "tmp/" + str(emb_i) + ".vecmap"
            target_output = "tmp/out.vecmap"
            dictionary = "tmp/" + str(emb_i) + ".dict"

            excec_com = ("python3 VecMap/map_embeddings.py --orthogonal " +
                         source_input + " " + target_input + " " +
                         source_output + " " + target_output + " -d " +
                         dictionary)
            print(excec_com)
            os.system(excec_com)

    print()

    printTrace("==> Generating Meta Embedding <==")

    embs = ""
    for emb_i, emb in enumerate(args.embeddings):
        if is_rot_in_input is None or (is_rot_in_input is not None
                                       and is_rot_in_input != emb_i):
            embs = embs + "tmp/" + str(emb_i) + ".vecmap "

    if is_rot_in_input is not None:
        embs = embs + "tmp/out.vecmap "

    excec_com = ("python3 embeddings_mean.py -i " + embs + "-o " +
                 args.output + " -b " + str(args.batch_size) + " -k " +
                 str(args.num_nearest_neighbor))

    if not args.generate_oov_words:
        excec_com = excec_com + " -oov"
    if args.vocabulary is not None:
        excec_com = excec_com + " -v " + args.vocabulary
    print(excec_com)
    os.system(excec_com)

    print()
    print("Done! Meta embedding generated in " + args.output)

    if not args.do_not_clean_files:
        print("Cleaning files...")
        try:
            os.system("rm -rf tmp")
        except:
            print("Could not delete the tmp folder, do it manually")
    0: "Angry",
    1: "Disgust",
    2: "Fear",
    3: "Happy",
    4: "Sad",
    5: "Surprise",
    6: "Neutral"
}

root = os.path.join('./')

base_dir = os.path.join(root, 'fer2013.csv')

df_train, df_test, df_val, df_inital = get_data(emotion_type_dict, base_dir)

width, height = get_dimensions(df_inital)

if opt.samples:
    faces = face_array(df_inital)
    samples(faces, df_inital, emotion_type_dict)

if opt.view_data_counts:
    compare(df_inital)

if opt.train:
    # resnet,vgg,densenet,inception
    model_name = opt.model
    num_classes = 7
    feature_extract = False
    # Initialize the model for this run
    model, input_size = initialize_model(model_name,
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--embedding', required=True)
    parser.add_argument('-c', '--cross_embedding', required=True)
    parser.add_argument('-o', '--output', required=True)
    parser.add_argument('-b', '--batch_size', type=int, default=1024)
    parser.add_argument('-k', '--num_nearest_neighbor', type=int, default=10)


    args = parser.parse_args()


    dims = get_dimensions(args.embedding)

    if dims != get_dimensions(args.cross_embedding):
        raise ValueError('All the embeddings must have the same number of dimensions and the embeddings must be in the word2vec format')

    printTrace('Reading vocab...')

    vocab_emb = vocab_from_path(args.embedding)
    vocab_cross = vocab_from_path(args.cross_embedding)

    total_vocab = set.union(set(vocab_emb), set(vocab_cross))
    interset_vocab = list(set.intersection(set(vocab_emb), set(vocab_cross)))
    vocab_to_generate = set(vocab_cross) - set(vocab_emb)

    print('Final embedding will have ' + str(len(total_vocab)) + ' words')
    print('We will generate ' + str(len(vocab_to_generate)) + ' words')

    emb = load_embedding(args.cross_embedding, vocabulary=None, lower=False, length_normalize=True, normalize_dimensionwise=False,
                         delete_duplicates=True)

    m = emb.words_to_matrix(vocab_to_generate)

    M = emb.words_to_matrix(interset_vocab)

    nn=[]

    for i_batch, mb in enumerate(batch(m, args.batch_size)):

        string = "<" + str(datetime.datetime.now()) + ">  " + 'Using Embedding ' + str(
            args.cross_embedding) + ' to generate vocab for Embedding ' + str(args.embedding) + ':  ' + str(
            int(100 * (i_batch * args.batch_size) / len(m))) + '%'
        print(string, end="\r")

        # print(np.asarray(mb).shape)
        # print(np.asarray(M).shape)


        result = cosine_knn(mb, M, args.num_nearest_neighbor)

        for i_result, indexes in enumerate(result):
            nn.append([interset_vocab[i] for i in indexes])

    del emb


    printTrace('===> Generating new_vocab <===')

    emb = load_embedding(args.embedding, vocabulary=None, lower=False, length_normalize=False, normalize_dimensionwise=False,
                         delete_duplicates=True)



    new_vectors = []
    for i_word, word in enumerate(vocab_to_generate):
        if i_word%1000 == 0:
            string = "<" + str(datetime.datetime.now()) + ">  " + 'Generating vocab ' + args.output + ': ' + str(
                int(100 * i_word / len(vocab_to_generate))) + '%'
            print(string, end="\r")

        try:
            lw = nn[i_word]
            v = np.zeros([dims], dtype=float)
            for word_nn in lw:
                v += emb.word_to_vector(word_nn)

        except KeyError as r:
            raise ValueError('Something went wrong in the word generation process')

        new_vectors.append(v/args.num_nearest_neighbor)

    print()


    printTrace('===> Printing to file <===')

    with open(args.output,'w') as file:

        print(str(len(emb.words)+len(vocab_to_generate)) + ' ' + str(dims),file=file)

        for w in emb.words:
            print(w + ' ' + ' '.join(['%.6g' % x for x in emb.word_to_vector(w)]), file=file)

        for w_i, w in enumerate(vocab_to_generate):
            print(w + ' ' + ' '.join(['%.6g' % x for x in new_vectors[w_i]]), file=file)
示例#10
0
comm = MPI.COMM_WORLD
master = 0
top, right = 1.0, 1.0
rows, cols = int(sys.argv[1]), int(sys.argv[2])
min_diff = float('inf')
epsilon = 10.0 ** (-7)

if comm.rank == master:
    t = Timer('TOTAL')
    t_init = Timer('initialization')
    print formatting(comm.rank, 'Calculate differential equation with {0} processes'.format(comm.size))

comm.bcast(rows, root=master)
comm.bcast(cols, root=master)

comm_rows, comm_cols = get_dimensions(rows, comm.size)

if comm.rank == master:
    print formatting(comm.rank, 'with {0}D topology: {1}x{2}'.format(
        2 if comm_cols > 1 else 1, comm_rows, comm_cols))

topo = comm.Create_cart(dims=[comm_rows, comm_cols], periods=[0, 0], reorder=True)
cart_comm = MPI.Cartcomm(topo)
coords = cart_comm.Get_coords(comm.rank)
comm_i, comm_j = coords

step_x = right / (cols - 1) if cols > 1 else float('inf')
step_y = -top / (rows - 1) if rows > 1 else float('inf')

first_cols = cols / comm_cols + cols % comm_cols
next_cols = cols / comm_cols