Пример #1
0
 def find_reflector(self, contours):
     correct_contours = []
     for con in contours:
         convex = cv2.convexHull(con)
         convex_area = cv2.contourArea(convex)
         box = utils.box(con)
         x1 = box[0][0]
         x2 = box[1][0]
         x3 = box[2][0]
         y1 = box[0][1]
         y2 = box[1][1]
         y3 = box[2][1]
         box_area = math.sqrt((x1 - x2)**2 +
                              (y1 - y2)**2) * (math.sqrt((x2 - x3)**2 +
                                                         (y2 - y3)**2))
         if (0.9 < box_area / convex_area < 1.15
                 and 12 > len(utils.points(con)) > 6):
             correct_contours.extend()
         return correct_contours
Пример #2
0
def text_generator(seed,
                   unconditional=False,
                   nsamples=1,
                   batch_size=-1,
                   length=-1,
                   temperature=0.7,
                   top_k=40):

    enc = get_encoder()
    context_tokens = enc.encode(seed)

    if batch_size == -1:
        batch_size = 1
    assert nsamples % batch_size == 0

    if length == -1:
        length = config.n_ctx // 2
    elif length > config.n_ctx:
        raise ValueError("Can't get samples longer than window size: %s" %
                         config.n_ctx)

    out = sample_sequence(
        model=model,
        length=length,
        context=context_tokens if not unconditional else None,
        start_token=enc.encoder['<|endoftext|>'] if unconditional else None,
        batch_size=batch_size,
        temperature=temperature,
        top_k=top_k,
        device=device)

    text = ''

    out = out[:, len(context_tokens):].tolist()
    for i in range(batch_size):
        text += enc.decode(out[i])

    html = ''
    html = add_content(
        html, header('Input Seed ', color='black', gen_text='Network Output'))
    html = add_content(html, box(seed, text))
    return f'<div>{html}</div>'
Пример #3
0
def generate_output(s, words_to_generate=50, diversity=0.75):
    """Generate output from a sequence"""
    # Mapping of words to integers
    word_idx = json.load(open('../data/word-index.json'))
    idx_word = {idx: word for word, idx in word_idx.items()}

    # Original formated text
    start = format_sequence(s).split()
    gen = []
    s = start[:]

    with graph.as_default():

        # Generate output
        for i in range(words_to_generate):
            # Conver to array
            x = np.array([word_idx.get(word, 0) for word in s]).reshape(
                (1, -1))

            # Make predictions
            preds = model.predict(x)[0].astype(float)

            # Diversify
            preds = np.log(preds) / diversity
            exp_preds = np.exp(preds)
            # Softmax
            preds = exp_preds / np.sum(exp_preds)

            # Pick next index
            next_idx = np.argmax(np.random.multinomial(1, preds, size=1))
            s.append(idx_word[next_idx])
            gen.append(idx_word[next_idx])

    # Formatting in html
    start = remove_spaces(' '.join(start)) + ' '
    gen = remove_spaces(' '.join(gen))
    html = ''
    html = addContent(
        html, header('Input Seed ', color='black', gen_text='Network Output'))
    html = addContent(html, box(start, gen))
    return html
Пример #4
0
    def measurements(original, contours):
        if not contours:
            return None, None
        distances = []
        for cnt in contours:
            points = utils.box(cnt)
            if not points.any():
                return None, None

            avg_real_heights = sum(utils.power_cube.values()) / len(
                utils.power_cube)

            heights = []
            for i, point in enumerate(points):
                x = point[0] - points[i - 1][0]
                y = point[1] - points[i - 1][1]
                height = math.hypot(x, y)
                heights.append(height)

            if len(points) == 5:
                max_height = max(heights)
                half_height = max_height / 2
                heights.remove(max_height)
                heights.extend([half_height] * 2)

            avg_heights = sum(heights) / len(heights)

            distances.append(
                (avg_real_heights * constants.FOCAL_LENGTHS['lifecam']) /
                avg_heights)

        min_distance = min(distances)
        chosen_one = contours[distances.index(min_distance)]
        angle = utils.angle(constants.FOCAL_LENGTHS['lifecam'],
                            utils.center(chosen_one)[0], original)
        return angle, min_distance
    hyerparameters_information = {}
    hyerparameters_information["learning_rate"] = learning_rate
    hyerparameters_information["learning_rate_decay"] = learning_rate_decay 
    hyerparameters_information["steps per decay"] = steps_per_decay
    hyerparameters_information["regularization_rate"] = regularization_rate
    hyerparameters_information["max_epoch_num"] = max_epoch_num
    hyerparameters_information["batch_size"] = batch_size
    hyerparameters_information["auto_terminate"] = auto_terminate
    if auto_terminate == True:
        hyerparameters_information["observation of loss num"] = observe_dif_times
        hyerparameters_information["terminate_threshold"] = terminate_threshold
    print("")
    utils.box_dict(hyerparameters_information, "hyerparameters_information")

    # start training
    utils.box("Training Process")

    index_select = np.arange(len(feature_vector))
    pbar = tqdm(range(max_epoch_num))
    
    for present_epoch in pbar:
        i = 0
        # shuffle data
        np.random.shuffle(index_select)
        feature_vector = feature_vector[index_select, :]
        training_labels = training_labels[index_select]
        
        while i<example_num:
            step += 1
            batch_input = feature_vector[i:i+batch_size]
            batch_labels = training_labels[i:i+batch_size]
    num_feature_vector = len(special_feature_vector)
    num_training_feature_vector = int(num_feature_vector *
                                      (1 - validation_proportion))
    num_validating_feature_vector = (num_feature_vector -
                                     num_training_feature_vector)

    best_weight_record = []
    best_validation_accur_record = []

    index_select_all = np.arange(num_feature_vector)

    #cross_validation_times = int(1 / validation_proportion)
    cross_validation_times = a

    utils.box("Training Start")

    graph = plt.subplot(1, 1, 1)

    for times in range(cross_validation_times):
        softmax_model.global_initialize()
        softmax_model.set_learning_rate(learning_rate)
        # add a position for weight storing , reset the best score as 0
        best_weight_record.append([])
        best_validation_accur_record.append([])
        best_step = 0
        best_accur = 0
        best_loss = 10
        # shuffle op
        np.random.shuffle(index_select_all)
        special_feature_vector = special_feature_vector[index_select_all, :]
    # store back
    special_feature_vector = feature_vector
    special_labels = training_labels

    num_feature_vector = len(special_feature_vector)
    num_training_feature_vector = int(num_feature_vector *
                                      (1 - validation_proportion))
    num_validating_feature_vector = (num_feature_vector -
                                     num_training_feature_vector)

    best_weight_record = []

    index_select_all = np.arange(num_feature_vector)

    utils.box("Training Process")

    softmax_model.global_initialize()
    softmax_model.set_learning_rate(learning_rate)
    # add a position for weight storing , reset the best score as 0
    best_weight_record.append([])
    times = 0
    best_step = 0
    best_accur = 0
    best_loss = 20

    # shuffle op
    np.random.shuffle(index_select_all)
    special_feature_vector = special_feature_vector[index_select_all, :]
    special_labels = special_labels[index_select_all]
Пример #8
0
def allocate():
    containers[1].clear()
    for i in range(n):
        containers[1].append(box(win,(10*i),space))