Ejemplo n.º 1
0
def layer():
    global NC_LAYER, NC_IMAGE  #, NC_SCORE

    print(utils.ribb("==", sep="="))
    print(utils.ribb("[%d] LAYER " % NC_LAYER, sep="="))
    print(utils.ribb("==", sep="="), "\n")

    # --- 1 step --- find all possible lines (that makes sense) ----------------
    print(utils.ribb(utils.head("SLID"), utils.clock(), "--- 1 step "))
    segments = pSLID(NC_IMAGE['main'])
    raw_lines = SLID(NC_IMAGE['main'], segments)
    lines = slid_tendency(raw_lines)

    # --- 2 step --- find interesting intersections (potentially a mesh grid) --
    print(utils.ribb(utils.head("LAPS"), utils.clock(), "--- 2 step "))
    points = LAPS(NC_IMAGE['main'], lines)
    #print(abs(49 - len(points)), NC_SCORE)
    #if NC_SCORE != -1 and abs(49 - len(points)) > NC_SCORE * 4: return
    #NC_SCORE = abs(49 - len(points))

    # --- 3 step --- last layer reproduction (for chessboard corners) ----------
    print(utils.ribb(utils.head(" LLR"), utils.clock(), "--- 3 step "))
    inner_points = LLR(NC_IMAGE['main'], points, lines)
    four_points = llr_pad(inner_points, NC_IMAGE['main'])  # padcrop

    # --- 4 step --- preparation for next layer (deep analysis) ----------------
    print(utils.ribb(utils.head("   *"), utils.clock(), "--- 4 step "))
    print(four_points)
    try:
        NC_IMAGE.crop(four_points)
    except:
        utils.warn("niestety, ale kolejna warstwa nie jest potrzebna")
        NC_IMAGE.crop(inner_points)

    print("\n")
Ejemplo n.º 2
0
def pattern(duration):
    '''
	Performs the shimmer pattern for the duration specified

	@param duration - the duration this pattern should run for
	'''
    duration = duration * 1000.0
    start_time = util.clock()

    num_leds = round(const.ALL_LEDS / 10.0)
    time_between_writes = 125

    num_leds = min(util.max_lines_per_write(time_between_writes), num_leds)

    while (start_time + duration > util.clock()):
        write_time = util.clock()
        util.clear_leds()

        LEDs = []
        for led in range(0, num_leds):
            curr_led = rand.randint(0, const.ALL_LEDS)
            if ((curr_led, const.WHITE) not in LEDs):
                LEDs.append((curr_led, const.WHITE))

        arduino.write_packet(LEDs)
        time_already_waited = util.clock() - write_time
        util.sleep(time_between_writes - time_already_waited)
def write_packet(LED_values):
    '''
    Writes a list of led values to the Arduino, waits till the next available led
    write time without stopping the clock

    @param[in]  LED_values - a list of tuples containing the colour value each led
                should be written to
    '''
    packet = encode_packet(LED_values)
    if (util.clock() >= next_write_time):
        port.write(packet.encode())
        next_write_time = util.time_to_next_write(len(LED_values)) + util.clock()
Ejemplo n.º 4
0
def layer():
    lt = time.time()
    global NC_LAYER, NC_IMAGE  #, NC_SCORE

    #print(utils.ribb("==", sep="="))
    #print(utils.ribb("[%d] LAYER " % NC_LAYER, sep="="))
    #print(utils.ribb("==", sep="="), "\n")

    # --- 1 step --- find all possible lines (that makes sense) ----------------
    print("Starting new round")
    lt = time.time()
    segments = pSLID(NC_IMAGE['main'])
    raw_lines = SLID(NC_IMAGE['main'], segments)
    lines = slid_tendency(raw_lines)

    # --- 2 step --- find interesting intersections (potentially a mesh grid) --
    print(utils.clock(),
          time.time() - lt, "--- 1 step --- found all lines", len(lines))
    v[0] += time.time() - lt
    lt = time.time()
    points = LAPS(NC_IMAGE['main'], lines)

    print(utils.clock(),
          time.time() - lt, "--- 2 step --- find all intersections",
          len(points))
    v[1] += time.time() - lt
    lt = time.time()
    four_points, mat_pts = hldet.getGridFromPoints(
        points, padding=0 if NC_LAYER == 2 else .25)
    re = four_points
    oim = NC_IMAGE['main'].copy()
    for pt in mat_pts:
        cv2.circle(oim, (int(pt[0]), int(pt[1])), 6, (255, 0, 0), 3)

    print(utils.clock(),
          time.time() - lt, "--- 3 step --- fit grid from points")
    v[2] += time.time() - lt
    lt = time.time()
    try:
        NC_IMAGE.crop(four_points)
    except:
        utils.warn("Error on crop")

    print(utils.clock(), time.time() - lt, "--- 4 step --- post crop")
    return re
Ejemplo n.º 5
0
def run_game():
    generation, width, height, board = read_file("input_data.txt")
    ok = board_validation(width, height, board)
    if not ok:
        logger.error("Bad board!")
        return False
    logger.info(board)

    for iteration in range(generation):
        board = clock(board)
    logger.info("End Game!")
    logger.info(board)
Ejemplo n.º 6
0
# convert input image to floating point
image_f = np.float32(image_ghost) / 255.0

# move colour dimension outside
image_f_flip = np.rollaxis(image_f, 2).ravel()

# result array
res = np.empty((3, rows, cols), np.float32)

# start timer
print "-------------------------------"
startTime = time.clock()
avgTime = 0
for run in range(1, nruns+1):
    frameStart = clock()
    # Compute
    unsharp(ctypes.c_int(cols), \
            ctypes.c_int(rows), \
            ctypes.c_float(threshold), \
            ctypes.c_float(weight), \
            ctypes.c_void_p(image_f_flip.ctypes.data), \
            ctypes.c_void_p(res.ctypes.data))
    frameEnd = clock()
    frameTime = float(frameEnd) - float(frameStart)

    if run != 1:
        print frameEnd*1000 - frameStart*1000, "ms"
        avgTime += frameTime

print "-------------------------------"
Ejemplo n.º 7
0
    def train_sine(self,
                   epochs=100,
                   report_error_avg=10,
                   batch_size=100,
                   logdir='tf-log/fourier/',
                   min_gt_prob=0.,
                   init_gt_prob=1.0,
                   decrease_per_batch=0.0005):
        # input_batch, target_batch = val_data.next()
        self.sess = tf.Session(graph=self.graph)
        self.sess.run(self.init)
        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        stats_writer = tf.summary.FileWriter(os.path.join(logdir, timestamp),
                                             graph=self.graph)
        for e in range(epochs):
            running_error = 0.
            val_error = 0.
            mean_running_error = 0.
            with clock():
                for i in range(report_error_avg):
                    input_batch, target_batch = generate_x_y_data_v3(
                        True, batch_size)
                    input_batch = input_batch.reshape(input_batch.shape[0],
                                                      -1).T
                    target_batch = target_batch.reshape(
                        target_batch.shape[0], -1).T
                    #     print(input_batch.shape, target_batch.shape)
                    if self.use_scheduled_sampling:
                        batch_no = e * report_error_avg + i
                        use_ground_truth_prob = max(
                            min_gt_prob,
                            init_gt_prob - decrease_per_batch * batch_no)
                        feed_dict = {
                            self.teacher_force: True,
                            self.keep_prob: 1.0,
                            self.sampling_probability:
                            1 - use_ground_truth_prob
                        }
                    else:
                        feed_dict = {
                            self.teacher_force: np.random.random_sample() <
                            self.teacher_forcing_ratio,
                            self.keep_prob: 1.0
                        }
                    # each decoder input is batch size x 1
                    feed_dict = self.feed_vals(input_batch, target_batch,
                                               feed_dict)
                    _, err, summary = self.sess.run(
                        [self.train_op, self.loss, self.summary_op],
                        feed_dict=feed_dict)
                    mean_preds = np.mean(input_batch[:, :],
                                         axis=1).reshape(-1, 1)
                    mean_errs = np.abs(mean_preds - target_batch)
                    batch_mean_err = np.mean(mean_errs)
                    running_error += err
                    mean_running_error += batch_mean_err
                for i in range(report_error_avg):
                    input_batch, target_batch = generate_x_y_data_v3(
                        True, batch_size)
                    input_batch = input_batch.reshape(input_batch.shape[0],
                                                      -1).T
                    target_batch = target_batch.reshape(
                        target_batch.shape[0], -1).T
                    #     print(input_batch.shape, target_batch.shape)
                    feed_dict = {
                        self.teacher_force: False,
                        self.keep_prob: 1.0
                    }
                    # each decoder input is batch size x 1
                    feed_dict = self.feed_vals(input_batch, target_batch,
                                               feed_dict)
                    val_err, summary = self.sess.run(
                        [self.loss, self.summary_op], feed_dict=feed_dict)
                    val_error += val_err
                    # stats_writer.add_summary(summary, e*report_error_avg + i)
                running_error /= report_error_avg
                mean_running_error /= report_error_avg
                val_error /= report_error_avg

            print("""End of epoch {0}: running error average = {1:.3f}
                     mean error average = {2:.3f}
                     val error average = {3:.3f}
                     use ground truth prob = {4}""".format(
                e + 1, running_error, mean_running_error, val_error,
                use_ground_truth_prob))
Ejemplo n.º 8
0
    def train(self,
              train_data,
              valid_data,
              epochs=20,
              keep_prob=0.7,
              logdir='tf-log',
              save=False,
              close_session=False):
        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        stats_writer = tf.summary.FileWriter(os.path.join(logdir, timestamp),
                                             graph=self.graph)
        model_dir = 'checkpoints/{}-{}-{}'.format(timestamp, self.n_cond,
                                                  self.n_pred)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        self.sess = tf.Session(graph=self.graph)
        self.sess.run(self.init)
        for e in range(epochs):
            running_error, val_error = 0., 0.
            median_running_error = 0.
            with clock():
                for input_batch, target_batch in train_data:
                    #     print(input_batch.shape, target_batch.shape)
                    feed_dict = {
                        self.teacher_force:
                        np.random.random_sample() < self.teacher_forcing_ratio,
                        self.keep_prob: keep_prob
                    }
                    # each decoder input is batch size x 1
                    feed_dict = self.feed_vals(input_batch, target_batch,
                                               feed_dict)
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict=feed_dict)
                    median_preds = np.median(input_batch[:, :],
                                             axis=1).reshape(-1, 1)
                    median_errs = np.abs(median_preds - target_batch)
                    batch_median_err = np.mean(median_errs)
                    running_error += err
                    median_running_error += batch_median_err
                running_error /= train_data.num_batches
                median_running_error /= train_data.num_batches
                if save:
                    self.saver.save(self.sess,
                                    model_dir +
                                    '/model.ckpt'.format(timestamp),
                                    global_step=e + 1)

            for input_batch, target_batch in valid_data:
                feed_dict = {
                    self.teacher_force: False,
                    self.keep_prob: 1.0,
                    self.supervise_train: True
                }
                feed_dict = self.feed_vals(input_batch, target_batch,
                                           feed_dict)
                val_err = self.sess.run(self.loss, feed_dict=feed_dict)
                # this time we don't need to feed in either decoder_inputs or targets
                val_error += val_err
            val_error /= valid_data.num_batches

            summary = tf.Summary(value=[
                tf.Summary.Value(tag="train_error",
                                 simple_value=running_error),
                tf.Summary.Value(tag="valid_error", simple_value=val_error),
            ])
            # http://stackoverflow.com/questions/37902705/how-to-manually-create-a-tf-summary
            stats_writer.add_summary(summary, e)

            print("""End of epoch {0}: running error average = {1:.3f}
                     median error average = {2:.3f}
                     val error average = {3:.3f}""".format(
                e + 1, running_error, median_running_error, val_error))

        if close_session:
            self.sess.close()
Ejemplo n.º 9
0
    np.empty((4, rows+2, cols+2), np.float32)
imgalpha_ghost[0:4, 1:rows+1, 1:cols+1] = \
    imgalpha_region

# convert input image to floating point
imgalpha_f = np.float32(imgalpha_ghost) / 255.0

# result array
res = np.empty((3, rows, cols), np.float32)

# start timer
print "-------------------------------"
startTime = time.clock()
avgTime = 0
for run in range(1, nruns+1):
    frameStart = clock()
    # Compute
    interpolate(ctypes.c_int(cols), ctypes.c_int(rows), \
                ctypes.c_void_p(imgalpha_f.ctypes.data), \
                ctypes.c_void_p(res.ctypes.data)
               )
    frameEnd = clock()
    frameTime = float(frameEnd) - float(frameStart)

    if run != 1:
        print frameEnd*1000 - frameStart*1000, "ms"
        avgTime += frameTime

print "-------------------------------"
print "avg time: ", (avgTime/(nruns-1))*1000, "ms"
print "-------------------------------"
Ejemplo n.º 10
0
    p = argparse.ArgumentParser(description=\
    'Find, crop and create FEN from image.')

    p.add_argument('mode', nargs=1, type=str, \
      help='detect | dataset | train')
    p.add_argument('--input', type=str, \
      help='input image (default: input.jpg)')
    p.add_argument('--output', type=str, \
      help='output path (default: output.jpg)')

    #os.system("rm test/steps/*.jpg") # FIXME: to jest bardzo grozne
    os.system("rm -rf test/steps; mkdir test/steps")

    args = p.parse_args()
    mode = str(args.mode[0])
    modes = {
        'detect': detect,
        'dataset': dataset,
        'train': train,
        'test': test
    }

    if mode not in modes.keys():
        utils.errn("hey, nie mamy takiej procedury!!! (wybrano: %s)" % mode)

    modes[mode](args)
    print(utils.clock(), "done")
    K.clear_session()
    gc.collect()  # FIX: tensorflow#3388