def main():
    """
    For egreedy in [0, 0.01, 0.1]:

    - Create a Bandit with 10 possible actions and plays 1000 times.
    - Repeat the whole thing 2000 times and average the results.
    """
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)  # for a better print

    # Let's record the execution times
    start = time.time()

    # greedy game
    game()

    # egreedy games
    game(egreedy=0.1)
    game(egreedy=0.01)

    end = time.time()
    total_time = time.gmtime(end - start)
    print "The total computation took: %dh:%d:%d\n" % (total_time.tm_hour,
        total_time.tm_min, total_time.tm_sec)

    # Draw graphs
    draw(REWARD_VALUES)
    draw(OPTIMAL_ACTIONS_VALUES)
Beispiel #2
0
def monitor_samples():
    itr = iter(sampling_loader)
    for i in range(8):
        data = itr.__next__()
        chars, chars_mask, strokes, strokes_mask = [x.cuda() for x in data]

        with torch.no_grad():
            stroke_loss, eos_loss, monitor_vars, _, teacher_forced_sample = model.compute_loss(
                chars, chars_mask, strokes, strokes_mask)
            generated_sample = model.sample(chars, chars_mask)[0]

        teacher_forced_sample = teacher_forced_sample.cpu().numpy()
        generated_sample = generated_sample.cpu().numpy()

        # Plotting image for phi
        phi = monitor_vars.pop('phi')
        fig = plot_image(phi[0].squeeze().cpu().numpy().T)
        writer.add_figure('attention/phi_%d' % i, fig, steps)

        # Line plot for alpha, beta and kappa
        for key, val in monitor_vars.items():
            fig = plot_lines(val[0].cpu().numpy().T)
            writer.add_figure('attention/%s_%d' % (key, i), fig, steps)

        # Draw generated and teacher forced samples
        fig = draw(generated_sample[0],
                   save_file=root / ("generated_%d.png" % i))
        writer.add_figure("samples/generated_%d" % i, fig, steps)

        fig = draw(teacher_forced_sample[0],
                   save_file=root / ("teacher_forced_%d.png" % i))
        writer.add_figure("samples/teacher_forced_%d" % i, fig, steps)
Beispiel #3
0
    def r(self):
        """transform out of tensor to numpy
            filter with confidence
            calculate coordinates
            filter with NMS
            crop image from original image for ONet's input
            draw"""
        start_time = time.time()
        data, prior = self.p()
        with torch.no_grad():
            confi, offset = self.rnet(data.cuda())
        confi = confi.cpu().numpy().flatten()
        offset = offset.cpu().numpy()

        offset, prior, confi = offset[confi >= 0.99], prior[confi >= 0.99], confi[confi >= 0.99]

        offset, landmarks = offset[:, :4], offset[:, 4:]
        offset, landmarks = utils.transform(offset, landmarks, prior)

        boxes = np.hstack((offset, np.expand_dims(confi, axis=1), landmarks))
        boxes = utils.NMS(boxes, threshold=0.6, ismin=False)

        o_data, o_prior = utils.crop_to_square(boxes[:, :5], 48, self.image)

        o_prior = np.stack(o_prior, axis=0)
        o_data = torch.stack(o_data, dim=0)
        end_time = time.time()
        print("RNet create {} candidate items\ncost {}s!".format(o_data.size(0), end_time - start_time))
        utils.draw(boxes, self.test_img, "RNet")
        return o_data, o_prior
Beispiel #4
0
def enrich_fixed_time(df, drop_open_row=True, show=False):
    df['fixed_time'] = 0
    dt = np.median(np.diff(-df.quaketime))
    transitions = np.where(
        np.logical_or(
            np.diff(-df.quaketime) > 10 * dt,
            np.diff(-df.quaketime) < -10 * dt))[0]
    transitions = np.concatenate(([-1], transitions, [len(df) - 1]))
    # shift times
    for ti, tf in zip(transitions[:-1], transitions[1:]):
        last = df.quaketime[tf]
        df.loc[ti + 1:tf,
               'fixed_time'] = last + 250e-9 * np.arange(tf - ti, 0, -1)
    # remove first entry of each block
    if drop_open_row:
        for ti in transitions[1:-1][::-1]:
            df.drop(ti + 1, axis=0, inplace=True)
    if show:
        ax = InteractiveFigure().get_axes()
        ax.plot(df.quaketime, color='blue', label='original')
        ax.plot(df.fixed_time, color='green', label='fixed (250 nano)')
        ax.set_title('Time correction', fontsize=14)
        ax.set_xlabel('Sample', fontsize=12)
        ax.set_ylabel('Time to quake', fontsize=12)
        ax.legend()
        utils.draw()
    return df
Beispiel #5
0
def main():
    for train_file, test_file in [
            # ("data/dataset1-a9a-training.txt", "data/dataset1-a9a-testing.txt"),
        ("data/covtype-training.txt", "data/covtype-testing.txt")
    ]:
        train_data, train_label = load_matrix_from_txt(train_file)
        test_data, test_label = load_matrix_from_txt(test_file)

        for method in ['logistic']:
            _, train_errors, test_errors, losses = sgd(
                train_data,
                train_label,
                test_data,
                test_label,
                method,
                max_iteration=max_iteration,
                step=step)
            draw(train_errors,
                 test_errors,
                 losses,
                 train_file,
                 max_iteration=max_iteration,
                 step=step,
                 lamda=lamda,
                 gamma=gamma)
def generate_noiseless(path_data, n_samples, prefix_save, scale_min_dist,
                       scale_max_dist):
    pcd_orig = read_data(path_data)
    pcd_orig.paint_uniform_color([0, 0, 0])  # original points are black

    pcd_all = sample_pcd(pcd_orig, "all", n_samples, scale_min_dist,
                         scale_max_dist)
    write_pcd(pcd_all, f"{prefix_save}all.ply")
    pcd_all.paint_uniform_color([1, 0, 0])

    # pcd_low = sample_pcd(pcd_orig, "low", n_samples, scale_min_dist, scale_max_dist)
    # write_pcd(pcd_low, f"{prefix_save}_low.ply")
    # pcd_low.paint_uniform_color([1, 0, 0])

    pcd_high = sample_pcd(pcd_orig, "high", n_samples, scale_min_dist,
                          scale_max_dist)
    write_pcd(pcd_high, f"{prefix_save}high.ply")
    pcd_high.paint_uniform_color([1, 0, 0])

    translate = 25
    # draw([pcd_orig, pcd_all.translate([translate, 0, 0]), pcd_low.translate([2*translate, 0, 0]), pcd_high.translate([3*translate, 0, 0])])
    draw([
        pcd_orig,
        pcd_all.translate([translate, 0, 0]),
        pcd_high.translate([2 * translate, 0, 0])
    ])
Beispiel #7
0
def main(config):
    logger = load_logger(config)
    try:
        np.random.seed(config.random_seed)  # 可复现
        data_original = Dataset(config)
        Net = Net_LSTM

        if config.model == 1:
            Net = Net_LSTM
        elif config.model == 2:
            Net = Net_BidirectionalLSTM

        if config.phase == "train":
            print("The soothsayer will train")
            train_X, valid_X, train_Y, valid_Y = data_original.get_train_and_valid_data(
            )
            train(Net, config, logger, [train_X, train_Y, valid_X, valid_Y])

        if config.phase == "predict":
            print("The soothsayer will predict")
            test_X, test_Y = data_original.get_test_data(
                return_label_data=True)

            pred_result = predict(Net, config, test_X)  # 这里输出的是未还原的归一化预测数据
            draw(config, data_original, logger, pred_result)
    except Exception:
        logger.error("Run Error", exc_info=True)
Beispiel #8
0
def LocalSearch(Position, method='local'):
    num_node = len(Position)
    Route = np.arange(num_node)
    np.random.shuffle(Route)
    ans = CalcDis(Position, Route)
    # init random answer

    plt.ion()
    _, axs = plt.subplots(1)
    draw(Position, Route, axs, 0, ans)

    Count = 1000
    if method == 'annealing':
        Temp0 = 50
        ans_k, Route_k = ans, Route
    ans_list = []
    for i in range(Count):
        if method == 'local':
            ans, Route = Local(Position, ans, Route)
        if method == 'annealing':
            Temp = Temp0 if i == 0 else Temp0 / np.log(1 + i)
            ans_k, Route_k, ans, Route = \
                Anneal(Position, ans_k, Route_k, ans, Route, Temp)
        ans_list.append(ans)
        draw(Position, Route, axs, i, ans)
        plt.pause(0.0001)

    plt.ioff()
    plt.show()

    plt.plot(np.arange(Count), ans_list)
    plt.show()

    return ans, Route
Beispiel #9
0
def apply_themes(args, device, model):
    img_path = os.path.join("photos", args.content_image)

    content_image = Image.open(img_path).resize(eval_size)

    masks = utils.get_masks(content_image, args.seg_threshold)
    filter_ids = utils.select_filters(masks, content_image, total_filters)

    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])

    content_image = content_transform(content_image)
    content_images = content_image.expand(len(filter_ids), -1, -1,
                                          -1).to(device)
    # one forward pass to render themes
    with torch.no_grad():
        if args.load_model is None or args.load_model == "None":
            theme_model = model
        else:
            # our saved models were trained with gpu 3
            #theme_model = torch.load(args.load_model, map_location={'cuda:3': 'cuda:0'})
            theme_model = torch.load(args.load_model)
            theme_model.eval()
        theme_model.to(device)
        output = theme_model(content_images, filter_ids).cpu()

    output_list = []
    for img in output:
        img = img.clone().clamp(0, 255).numpy()
        img = img.transpose(1, 2, 0).astype("uint8")
        output_list.append(img)

    rendered_themes = utils.render_themes(output_list, masks)
    utils.draw(rendered_themes, args.content_image, args.output_image)
Beispiel #10
0
def main(codes):
    memory = {i: v for i, v in enumerate(codes)}
    read = deque()
    p = program(memory.copy(), read)

    mapa = {}

    i = 0
    j = 0
    start = None
    diri = None
    try:
        while True:
            r = next(p)
            if r == 10:
                i += 1
                j = 0
            else:
                c = chr(r)
                if c == '#':
                    mapa[complex(i, j)] = c
                elif c == '^':
                    mapa[complex(i, j)] = '^'
                    start = complex(i, j)
                    diri = complex(-1, 0)
                j += 1
                # print(c, end='')
    except StopIteration:
        pass

    path = computePath(mapa, start, diri)
    # scaffold(mapa, start, complex(0,-1))
    draw(mapa)

    pathStr = ",".join(map(str, path)) + ","

    # send main routine
    routine, methods = regexp(pathStr)

    # A = ['L', 10, 'L', 8, 'R', 12]
    # B = ['L', 8, 'L', 10, 'L', 6, 'L', 6]
    # C = ['L', 6, 'R', 8, 'R', 12, 'L', 6, 'L', 8]
    # routine = ['C','A','C','B','A','B','A','C','B','A']

    inp = ",".join(routine) + '\n'
    for m in methods:
        inp += m[:-1] + "\n"
    inp += "n\n"

    mem2 = memory.copy()
    mem2[0] = 2
    read2 = deque(map(ord, inp))
    p = program(mem2, read2)
    # while True:
    # res = next(p)
    # print('>', res, chr(res)))
    *_, res = p
    print(">", res)
Beispiel #11
0
def pathfinder(algorithm, name, win, win_rows, win_width, line_height):
    pygame.display.set_caption(name)
    grid = utils.make_grid(win_rows, line_height)

    start = None
    end = None

    run = True
    started = False
    while run:
        utils.draw(win, grid, win_rows, win_width, line_height=line_height)
        for event in pygame.event.get():
            if utils.is_quit(event):
                run = False

            if started:
                continue

            if pygame.mouse.get_pressed()[0]:
                pos = pygame.mouse.get_pos()
                row, col = utils.get_clicked_pos(pos, win_rows, win_width,
                                                 line_height)
                if row >= 0 and col >= 0:
                    spot = grid[row][col]
                    if not start and spot != end:
                        start = spot
                        start.make_start()
                    elif not end and spot != start:
                        end = spot
                        end.make_end()
                    elif spot != end and spot != start:
                        spot.make_barrier()
            elif pygame.mouse.get_pressed()[2]:
                pos = pygame.mouse.get_pos()
                row, col = utils.get_clicked_pos(pos, win_rows, win_width,
                                                 line_height)
                if row >= 0 and col >= 0:
                    spot = grid[row][col]
                    spot.reset()
                    if spot == start:
                        start = None
                    elif spot == end:
                        end = None

            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_SPACE and start and end:
                    for row in grid:
                        for spot in row:
                            spot.update_neighbors(grid)
                    algorithm(
                        lambda: utils.draw(win, grid, win_rows, win_width,
                                           line_height), grid, start, end)

                if event.key == pygame.K_c:
                    start = None
                    end = None
                    grid = utils.make_grid(win_rows, line_height)
Beispiel #12
0
def main(argv):
    del argv

    if FLAGS.rm:
        os.remove(FLAGS.out)
    else:
        if FLAGS.out in os.listdir('./'):
            logging.fatal(('%s is not empty. Make sure you have'
                           ' archived previously generated data. '
                           'Try --rm flag which will automatically'
                           ' delete previous data.') % FLAGS.out)

    # for reproducing purpose
    # np.random.seed(100)

    trials = FLAGS.trials
    freq = FLAGS.freq
    T = FLAGS.T
    inputnum = FLAGS.inputnum if FLAGS.minimax else 1

    # policies to be compared
    # add your methods here
    policies = [MultiUCB(FLAGS.alpha), LinUCB(FLAGS.alpha_LinUCB, FLAGS.T)]

    for policy in policies:
        logging.info('run policy %s' % policy.name)
        for trial in range(trials):
            if trial % 50 == 0:
                logging.info('trial: %d' % trial)

            minimax_regret = dict()

            for _ in range(inputnum):
                contexts = list(sphere_sampling(3, FLAGS.armnum))
                theta = [1, 0, 0]
                bandit = LinearBandit(contexts, theta)
                agg_regret = dict()
                # initialization
                bandit.init()
                policy.init(contexts)
                rewards = 0
                for t in range(0, T + 1):
                    if t > 0:
                        action = policy.choice(t)
                        reward = bandit.pull_arm(action)
                        policy.update(reward, action)
                        rewards += reward
                    if t % freq == 0:
                        agg_regret[t] = bandit.regret(rewards)
                for t in agg_regret:
                    minimax_regret[t] = max(minimax_regret.get(t, 0),
                                            agg_regret[t])
            # output one trial result into the output file
            write_to_file(dict({policy.name: minimax_regret}))

    # generate the final figure
    draw()
Beispiel #13
0
def node(turtle_name, indx, step, no_turtles, world):
    x = 0.0
    y_start = indx * (world.window_height / no_turtles)
    y_end = (indx + 1) * (world.window_height / no_turtles)
    turtle = world.spawn(turtle_name, x, y_start)

    draw(turtle, y_start, y_end, world, step)

    world.kill(turtle_name)
Beispiel #14
0
    def loss(self, input_seq, target):
        output = self(input_seq)

        l2_loss = F.mse_loss(output * 255, target * 255)
        l1_loss = F.l1_loss(output * 255, target * 255)
        from utils import draw
        draw(target, output)

        return l1_loss, l2_loss
Beispiel #15
0
    def p(self):
        """transform out of tensor to numpy
            filter with confidence
            calculate coordinates
            filter with NMS
            crop image from original image for RNet's input
            draw"""
        r_prior, r_data = [], []  # collect RNet's prior, RNet's input
        coordinates = []  # collect coordinates for draw
        count = 0
        start_time = time.time()
        while min(self.img.size) > 12:
            scal = 0.707**count  # 缩放比例,可以还原到原图  0.707为面积的一半
            input = tf.ToTensor()(self.img).unsqueeze(dim=0) - 0.5
            with torch.no_grad():
                confi, offset = self.pnet(input.cuda())
            W = offset.size(3)  # 取出图片的w值
            confi = confi.permute(0, 2, 3, 1)
            confi = confi.reshape(-1).cpu().numpy()
            offset = offset.permute(0, 2, 3, 1)  # 换轴,将四个通道数据组合到一起
            offset = offset.reshape((-1, 14)).cpu().numpy()

            o_index = np.arange(len(offset)).reshape(-1, 1)  # 特征图W_out*H_out
            offset, o_index, confi = offset[confi >= 0.9], o_index[
                confi >= 0.9], confi[confi >= 0.9]

            y_index, x_index = divmod(o_index,
                                      W)  # 索引/w  在特征图中对应索引为(x,y)=(余数, 商)
            x1, y1, x2, y2 = x_index * 2 / scal, y_index * 2 / scal, (
                x_index * 2 + 12) / scal, (y_index * 2 +
                                           12) / scal  # 左上角=索引*步长  右上角=左上角+边长
            p_prior = np.hstack((x1, y1, x2, y2))  # 将原图坐标组合为一个二维数组
            offset, landmarks = offset[:, :4], offset[:, 4:]
            offset, landmarks = utils.transform(offset, landmarks, p_prior)

            boxes = np.hstack((offset, np.expand_dims(confi, axis=1),
                               landmarks))  # 将偏移量与置信度结合,进行NMS
            boxes = utils.NMS(boxes, threshold=0.7, ismin=False)
            coordinates.extend(boxes.tolist())
            if boxes.shape[0] == 0:
                break

            data, prior = utils.crop_to_square(boxes[:, :5], 24, self.image)
            r_prior.extend(prior)
            r_data.extend(data)
            self.img = self.pyramid()  # 图像金字塔
            count += 1

        r_prior = np.stack(r_prior, axis=0)  # 数据重组,重新装载为numpy和tensor
        r_data = torch.stack(r_data, dim=0)
        end_time = time.time()
        print("PNet create {} candidate items\ncost {}s!".format(
            r_data.size(0), end_time - start_time))
        utils.draw(np.stack(coordinates, axis=0), self.test_img, "PNet")
        return r_data, r_prior
Beispiel #16
0
 def plot_masked_signals(self, n_max=None):
     n = len(self.t[:n_max])
     fig, axs = plt.subplots(len(self.masks), len(self.signals))
     for i, mask in enumerate(self.masks):
         for j, sig in enumerate(self.signals):
             ax = axs[i,j]
             ids = np.logical_not(self.masks[mask])[:n_max]
             ax.plot(self.t[:n_max][ids], self.signals[sig][:n_max][ids],
                     'b.' if n<1e3 else 'b,')
             ids = self.masks[mask][:n_max]
             ax.plot(self.t[:n_max][ids], self.signals[sig][:n_max][ids], 'r.')
             ax.grid()
             if j==0: ax.set_ylabel('Mask: ' + mask, fontsize=12)
             if i==0: ax.set_title('Signal: ' + sig, fontsize=12)
             utils.draw()
Beispiel #17
0
    def action(self, sep_ratio, frame):
        width = frame.shape[1]
        height = frame.shape[0]
        if self.wander_mode:
            print(self.counter)
            pyautogui.keyDown('d')
            self.present_key = 'd'
            draw(frame, 'finding target...', 0, int(frame.shape[1] * 1 / 10))
            self.wander_mode = not self.wander_on(0.2)
            return

        if self.has_target:
            self.counter = 0
            left_lim = int(width / 2 - width * sep_ratio)
            right_lim = int(width / 2 + width * sep_ratio)
            if left_lim <= self.midpoint_x <= right_lim:
                if self.present_key == 'w':
                    pyautogui.keyDown('w')
                    print('forward!')
                else:
                    key_up()
                    pyautogui.keyDown('w')
                    self.present_key = 'w'
                    print('forward')
            elif self.midpoint_x <= left_lim:
                if self.present_key == 'a':
                    pyautogui.keyDown('a')
                    print('turn left!')
                else:
                    key_up()
                    pyautogui.keyDown('a')
                    self.present_key = 'a'
                    print('turn left!')
            elif self.midpoint_x >= right_lim:
                if self.present_key == 'd':
                    pyautogui.keyDown('d')
                    print('turn right!')
                else:
                    key_up()
                    pyautogui.keyDown('d')
                    self.present_key = 'd'
                    print('turn right!')
            else:
                print('error!')
        else:
            key_up()
            self.present_key = None
            self.wander_mode = self.wander_on(2)
Beispiel #18
0
def test_losers_advantage(client):
    """
        user with the lose_count of 3 and others with that of 0 attempt to
        apply a lottery
        test loser is more likely to win
        target_url: /lotteries/<id>/draw
    """
    users_num = 12

    idx = 1
    win_count = {i: 0 for i in range(1, users_num + 1)}     # user.id -> count

    with client.application.app_context():
        target_lottery = Lottery.query.get(idx)
        index = target_lottery.index

        users = User.query.order_by(User.id).all()[:users_num]
        users[0].lose_count = 3
        user0_id = users[0].id

        add_db(users2application(users, target_lottery))

        token = get_token(client, admin)

        resp = draw(client, token, idx, index)

        for winner_json in resp.get_json():
            winner_id = winner_json['id']
            win_count[winner_id] += 1

        # display info when this test fails
        print("final results of applications (1's lose_count == 3)")
        print(win_count)

        assert win_count[user0_id] > 0
Beispiel #19
0
def videoplay_loop():
    while True:
        faceDetectInfo = upstream_queue.get()
        frame, faceRectList, faceOrientList, faceIDList, faceNum = \
            faceDetectInfo[0], faceDetectInfo[1], faceDetectInfo[2], faceDetectInfo[3], faceDetectInfo[4]
        for i in range(faceNum):
            singleFaceInfo = sInfo.ASF_SingleFaceInfo()
            singleFaceInfo.faceRect = faceRectList[i]
            singleFaceInfo.faceOrient = faceOrientList[i]
            nameLabel = ' '
            if faceIdToName and faceIDList[i] in faceIdToName:
                nameLabel = faceIdToName[faceIDList[i]]
            expressLabel = ' '
            if expressionDict and faceIDList[i] in expressionDict:
                expressLabel = expressionDict[faceIDList[i]]
            # print("expressLabel",expressLabel)
            box = singleFaceInfo.faceRect
            # print('box', box)
            frame = utils.draw(frame, nameLabel, expressLabel, box)

        cv2.imshow('cap', frame)
        key = cv2.waitKey(1) & 0xff
        if key == ord(" "):
            cv2.waitKey(0)
        if key == ord("q"):
            break
    cap.release()
    cv2.destroyAllWindows()
def test(dataloader, model_p, writer, train):
    error1 = 0
    error2 = 0
    print('testing!')
    counter = 0
    for batch_idx, sample in enumerate(dataloader):
        diagnosis = sample['diagnosis'][:, :train_time].to(device).float()
        gt_infection = sample['infection'][:, :train_time -
                                           1].to(device).float()
        gt_prediction = sample['infection'][:, train_time - 2:train_time - 2 +
                                            sequence_length]
        infection, g0, prediction = model_p(diagnosis)
        infection_tmp = infection.detach()
        error1 += torch.sum(
            torch.abs(infection_tmp[gt_infection != 0] -
                      gt_infection[gt_infection != 0]) /
            (torch.abs(gt_infection[gt_infection != 0]))) / (
                infection.shape[1])
        prediction_tmp = prediction.detach()
        error2 += torch.sum(
            torch.abs(prediction_tmp.float() - gt_prediction.float()) /
            (torch.abs(gt_prediction.float()))) / (prediction.shape[1])
        if batch_idx % 20 == 0:
            for i in range(args.batch_size):
                img = utils.draw(infection[i, :], diagnosis[i, :],
                                 gt_infection[i, :], prediction[i, :],
                                 gt_prediction[i, :], train, i)
                writer.add_figure('img_test', img)
            counter += 1
            if counter == 3:
                break
    return error1 / (20 * 3 * 10), error2 / (20 * 3 * 10)
def test(dataloader, model_p):
    print('testing!')
    counter = 0
    for batch_idx, sample in enumerate(dataloader):
        diagnosis = sample['diagnosis'][:, :train_time].to(device).float()
        gt_infection = sample['infection'][:, :train_time -
                                           1].to(device).float()
        gt_prediction = sample['infection'][:, train_time - 2:train_time - 2 +
                                            sequence_length]
        infection, g0, prediction = model_p(diagnosis)
        if counter > 300:
            break
        for i in range(args.batch_size):
            counter += 1
            utils.draw(infection[i, :], diagnosis[i, :], gt_infection[i, :],
                       prediction[i, :], gt_prediction[i, :], False, counter)
Beispiel #22
0
 def predict(self):
     self.net.eval()
     outputs = self.net(self.data)
     outputs = self.__transform__(outputs)
     info = self.__parse__(outputs)
     if len(info) == 0:
         raise Exception("Warning! no boxes on the current threshold!!!")
     boxes = self.__select__(info.cpu())
     return utils.draw(boxes, self.test_file)
Beispiel #23
0
 def oneRound():
     current = {}
     for i in bn.topologicalOrder():
         q = gum.Potential(bn.cpt(i))
         for j in bn.parents(i):
             q *= current[j]
         q = q.margSumIn([bn.variable(i).name()])
         v, current[i] = utils.draw(q)
         estimators[i].add(q)
Beispiel #24
0
def Genetic(Position):
    GroupSize = 500
    num_node = len(Position)
    Routes = []
    Route = np.arange(num_node)
    for i in range(GroupSize):
        np.random.shuffle(Route)
        Routes.append(Route)
    Routes = np.stack(Routes)
    # init random answer

    plt.ion()
    _, axs = plt.subplots(1)

    AnsAll = Eval(Position, Routes)
    ans = AnsAll.min()
    Route = Routes[AnsAll.argmin()]
    draw(Position, Route, axs, 0, ans)
    ans_list = []
    Count = 1000
    for i in range(Count):
        Routes = Select(Routes, AnsAll, i)
        # select
        Routes = Mating(Routes)
        # mating
        Routes = Variation(Position, Routes, i)
        # variation
        AnsAll = Eval(Position, Routes)
        if AnsAll.min() < ans:
            ans = AnsAll.min()
            Route = Routes[AnsAll.argmin()]
        # evaluate
        draw(Position, Route, axs, i, ans)
        plt.pause(0.0001)
        ans_list.append(ans)

    plt.ioff()
    plt.show()

    plt.plot(np.arange(Count), ans_list)
    plt.show()

    return ans, Route
Beispiel #25
0
    def o(self):
        """transform out of tensor to numpy
            filter with confidence
            calculate coordinates
            filter with NMS
            draw"""
        data, prior = self.r()
        confi, offset = self.onet(data.cuda())
        confi = confi.data.cpu().numpy().flatten()
        offset = offset.data.cpu().numpy()
        offset, prior, confi = offset[confi >= 0.999], prior[confi >= 0.999], confi[confi >= 0.999]
        offset, landmarks = offset[:, :4], offset[:, 4:]
        offset, landmarks = utils.transform(offset, landmarks, prior)

        boxes = np.hstack((offset, np.expand_dims(confi, axis=1), landmarks))  # 将偏移量与置信度以及landmarks结合,进行NMS
        boxes = utils.NMS(boxes, threshold=0.4, ismin=True)

        print("ONet create {} candidate items".format(boxes.shape[0]))
        utils.draw(boxes, self.test_img, "ONet")
Beispiel #26
0
    def start(self):
        self.population = self.random_population_init()
        eval_res = self.evaluate(self.population, 0)
        for t in range(self.generations):
            self.population2 = self.select(self.population, eval_res)
            self.population2 = self.mutate(self.population2)
            eval_res = self.evaluate(self.population2, t + 1)
            self.population = self.population2

        print(
            f"------------------------ ITERATION #{self.best[0]} ---------------------------"
        )
        print(
            f"Clique size = {len(self.best[2])}\nNodes in clique -> {sorted(self.best[2])}"
        )
        if self.draw:
            utils.draw(self.graph,
                       f"Best clique found in {self.generations} iterations",
                       self.best[2])
Beispiel #27
0
def main(codes):
    memory = {i: v for i, v in enumerate(codes)}
    read = deque()
    p = program(memory, read)
    print(memory)

    mapa = {}
    count = 0
    for i in range(50):
        for j in range(50):
            z = complex(i, j)
            read = deque([i, j])
            p = program(memory.copy(), read)
            res = next(p)
            if res == 1:
                mapa[z] = '#'
                count += 1
    draw(mapa)
    print('count', count)
Beispiel #28
0
def averaged_spectrum(df):
    # find transitions
    dt = np.median(np.diff(-df.quaketime))
    transitions = np.where(
        np.logical_or(
            np.diff(-df.quaketime) > 10 * dt,
            np.diff(-df.quaketime) < -10 * dt))[0] + 1
    transitions = np.concatenate(([0], transitions, [len(df)]))
    print('Distribution of blocks lengths:')
    print(
        utils.dist(
            [tf - ti for ti, tf in zip(transitions[:-1], transitions[1:])]))
    print(f'Valid blocks (4096 samples):\t' +
          f'{np.sum(np.diff(transitions)==4096):.0f}/{len(transitions):d}')
    # calculate ffts
    ffts = []
    for ti, tf in zip(transitions[:-1], transitions[1:]):
        if tf - ti != 4096: continue
        f = np.fft.rfft(df.signal[ti:tf], norm='ortho')[1:]
        ffts.append(np.abs(f))
    # plot averaged fft
    avg_fft = np.mean(ffts, axis=0)
    fig, axs = plt.subplots(3, 1)
    axs[0].plot(avg_fft, linewidth=0.8)
    axs[0].set_ylabel('Averaged Fourier Over Blocks')
    axs[1].plot(ffts[3], linewidth=0.8)
    axs[1].set_ylabel('FFT: Block 3')
    axs[2].plot(ffts[30], linewidth=0.8)
    axs[2].set_ylabel('FFT: Block 30')
    # plot distribution of [max(abs(f)) for f in ffts]
    fig, axs = plt.subplots(2, 1)
    axs[0].plot(
        utils.dist([np.max(f) for f in ffts],
                   np.arange(1001) / 10)[2:])
    axs[0].set_xlabel('Quantile [%]')
    axs[0].set_ylabel('Max Fourier Amplitude in Block')
    axs[1].plot(
        utils.dist([np.argmax(f) for f in ffts],
                   np.arange(1001) / 10)[2:])
    axs[1].set_xlabel('Quantile [%]')
    axs[1].set_ylabel('Frequency of Max Fourier Amplitude')
    utils.draw()
Beispiel #29
0
def test_draw_invalid(client):
    """attempt to draw non-exsit lottery
        target_url: /lotteries/<id>/draw [POST]
    """
    idx = invalid_lottery_id
    token = get_token(client, admin)

    resp = draw(client, token, idx)

    assert resp.status_code == 404
    assert 'Not found' in resp.get_json()['message']
Beispiel #30
0
def plot_data(df, cols=None, split_col=None, title='', ax=None):
    # initialization
    mpl.rcParams['agg.path.chunksize'] = int(1e7)
    if ax is None:
        fig, ax = plt.subplots()
    if isinstance(ax, str) and ax == 'interactive':
        ax = InteractiveFigure().get_axes()
    if cols is None:
        cols = df.columns[:2]
    n = df.shape[0]
    double_y = (not isinstance(cols, str)) and len(cols) > 1
    n_samples = utils.counter_to_str(n)
    tit = title + '\n' if title else title
    tit += f'({n_samples:s} samples)'
    shape = '-' if n <= 150e3 else ','

    # first axis
    col = cols[0] if double_y else cols
    ax.plot(np.arange(len(df[col])), df[col], 'b' + shape)
    ax.grid()
    ax.set_title(tit, fontsize=14)
    ax.set_xlabel('Sample', fontsize=12)
    ax.set_ylabel(col, color='b')
    ax.tick_params('y', colors='b')

    # vertical splitting lines
    if split_col:
        M = np.max(np.abs(df[col]))
        ids = np.where(np.diff(df[split_col]) != 0)
        for i in ids:
            ax.plot((i + 1, i + 1), (-M, M), 'k-')

    # second axis
    if double_y:
        ax2 = ax.twinx()
        col = cols[1]
        ax2.plot(np.arange(len(df[col])), df[col], 'r' + shape)
        ax2.set_ylabel(col, color='r')
        ax2.tick_params('y', colors='r')

    utils.draw()
    def gibbs_sample_inside_loop_i_embed(self, i_embed, j_prev_assignment=None, anneal_temp=1, i_utt=None):
        """
        Perform the inside loop of Gibbs sampling for data vector `i_embed`.
        """

        # Temp
        # print "j_prev_assignment", j_prev_assignment
        # print self.lm.unigram_counts
        # print self.lm.bigram_counts
        # print

        # Compute log probability of `X[i]` belonging to each component; this
        # is the bigram version of (24.26) in Murphy, p. 843.
        if j_prev_assignment is not None:
            log_prob_z = np.log(self.lm.prob_vec_given_j(j_prev_assignment))
        else:
            log_prob_z = self.lm.log_prob_vec_i()
        # print log_prob_z

        # Scale with language model scaling factor
        log_prob_z *= self.lms
        # print log_prob_z
        if i_utt is not None and i_utt == i_debug_monitor:
            logger.debug("lms * log(P(z=i|z_prev=j)): " + str(log_prob_z))
            logger.debug("log(p(x|z=i)): " + str(self.acoustic_model.components.log_post_pred(i_embed)))

        # Bigram version of (24.23) in Murphy, p. 842
        log_prob_z[:self.acoustic_model.components.K] += self.acoustic_model.components.log_post_pred(i_embed)
        # Empty (unactive) components
        log_prob_z[self.acoustic_model.components.K:] += self.acoustic_model.components.log_prior(i_embed)
        if anneal_temp != 1:
            log_prob_z = log_prob_z - _cython_utils.logsumexp(log_prob_z)
            log_prob_z_anneal = 1./anneal_temp * log_prob_z - _cython_utils.logsumexp(1./anneal_temp * log_prob_z)
            prob_z = np.exp(log_prob_z_anneal)
        else:
            prob_z = np.exp(log_prob_z - _cython_utils.logsumexp(log_prob_z))
        assert not np.isnan(np.sum(prob_z))

        if i_utt is not None and i_utt == i_debug_monitor:
            logger.debug("P(z=i|x): " + str(prob_z))

        # Sample the new component assignment for `X[i]`
        k = utils.draw(prob_z)

        # There could be several empty, unactive components at the end
        if k > self.acoustic_model.components.K:
            k = self.acoustic_model.components.K

        if i_utt is not None and i_utt == i_debug_monitor:
            logger.debug("Adding item " + str(i_embed) + " to acoustic model component " + str(k))
        self.acoustic_model.components.add_item(i_embed, k)

        return k
Beispiel #32
0
def test_draw_noperm(client):
    """attempt to draw without proper permission.
        target_url: /lotteries/<id>/draw [POST]
    """
    idx = 1
    token = get_token(client, test_user)

    resp = draw(client, token, idx)

    assert resp.status_code == 403
    assert 'You have no permission to perform the action' in \
        resp.get_json()['message']
Beispiel #33
0
    def gibbs_sample_inside_loop_i(self, i, anneal_temp=1): #, lms=1.):
        """
        Perform the inside loop of Gibbs sampling for data vector `i`.

        This is the inside of `gibbs_sample` and can be used by outside objects
        to perform only the inside loop part of the Gibbs sampling operation.
        The step in the loop is sample a new assignment for data vector `i`.
        The reason for not replacing the actual inner part of `gibbs_sample` by
        a call to this function is because this won't allow for caching the old
        component stats.
        """

        # Compute log probability of `X[i]` belonging to each component
        # (24.26) in Murphy, p. 843
        log_prob_z = self.lms * (
            np.ones(self.components.K_max)*np.log(
                float(self.alpha)/self.components.K_max + self.components.counts
                )
            )

        # (24.23) in Murphy, p. 842
        log_prob_z[:self.components.K] += self.components.log_post_pred(i)
        # Empty (unactive) components
        log_prob_z[self.components.K:] += self.components.log_prior(i)
        if anneal_temp != 1:
            log_prob_z = log_prob_z - logsumexp(log_prob_z)
            log_prob_z_anneal = 1./anneal_temp * log_prob_z - logsumexp(1./anneal_temp * log_prob_z)
            prob_z = np.exp(log_prob_z_anneal)
        else:
            prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
        # prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
        assert not np.isnan(np.sum(prob_z))

        # Sample the new component assignment for `X[i]`
        k = utils.draw(prob_z)

        # There could be several empty, unactive components at the end
        if k > self.components.K:
            k = self.components.K

        logger.debug("Adding item " + str(i) + " to acoustic model component " + str(k))
        self.components.add_item(i, k)
Beispiel #34
0
 def test_read_svgd(self):
     p = Path.read_svgd("../toys/spiral.svgd")
     if draw:
         utils.draw(p[0], scale=0.4)
Beispiel #35
0
    def test_path(self):
        a = Path()
        a.append_curve(CubicBezier(Point(-7, -3), Point(2, 8), Point(2, 1), Point(-2, 0)))

        self.assertEqual(a.size(), 1)
        self.assertFalse(a.closed())
        self.path(a)

        a.close(True)
        self.assertTrue(a.closed())
        self.path(a)

        a.close(False)
        a.append_curve(LineSegment(a.final_point(), Point(3, 5)))
        self.assertEqual(a.size(), 2)
        self.path(a)

        a.append_SBasis(SBasis(3, 6) * SBasis(1, 0), SBasis(5, 2))
        self.path(a)

        a.append_curve(EllipticalArc(Point(), 1, 2, math.pi / 6, True, True, Point(1, 1)), Path.STITCH_DISCONTINUOUS)
        # Stitching adds new segment
        self.assertEqual(a.size(), 5)

        b = Path()
        for c in a:
            b.append_curve(c)

        # TODO: This fails with STITCH_DISCONTINUOUS, but also does so in C++, so
        # it's either correct behaviour or bug in 2geom
        # ~ self.path(b)

        b.insert(2, LineSegment(b[2 - 1](1), b[2](0)))  # , Path.STITCH_DISCONTINUOUS)
        self.curves_equal(LineSegment(b[2 - 1](1), b[2](0)), b[2])
        # TODO! fails on root finding
        # self.path(b)

        b.set_initial(a[2](1))
        b.set_final(a[3](0))

        a.insert_slice(3, b, 0, b.size())
        self.assertEqual(a.size(), b.size() * 2 - 1)

        for i in range(b.size()):
            self.curves_equal(a[3 + i], b[i])

        # Looks like bug:
        #        A = Path()
        #        A.append_curve( CubicBezier( Point(-7, -3), Point(2, 8), Point(2, 1), Point(-2, 0) ) )
        #        A.append_curve(EllipticalArc(Point(), 1, 2, math.pi/6, True, True, Point(1, 1)), Path.STITCH_DISCONTINUOUS)
        #        print A.roots(0, 1)

        # Roots are [1.0, 2.768305708350847, 3.25], Point at second root is
        # Point (2.32, -0.48)
        # and third root is > 3 - it corresponds to root on closing segment, but A is open,
        # and computing A(3.25) results in RangeError - this might be bug or feature.

        self.path(a.portion(0.232, 3.12))
        self.path(a.portion(interval=Interval(0.1, 4.7)))
        self.path(a.portion(0.232, 3.12).reverse())

        b.clear()
        self.assertTrue(b.empty())

        aa = Path()
        for c in a:
            aa.append_curve(c)

        a.erase(0)
        self.assertEqual(a.size(), aa.size() - 1)
        self.assertAlmostEqual(a(0), aa(1))

        a.erase_last()
        self.assertEqual(a.size(), aa.size() - 2)
        self.assertAlmostEqual(a.final_point(), aa[aa.size() - 2](1))

        a.replace(3, QuadraticBezier(a(3), Point(), a(4)))
        self.assertEqual(a.size(), aa.size() - 2)

        cs = [
            LineSegment(Point(-0.5, 0), Point(0.5, 0)).transformed(
                Rotate(-math.pi / 3 * i) * Translate(Point(0, math.sqrt(3) / 2) * Rotate(-math.pi / 3 * i))
            )
            for i in range(6)
        ]

        hexagon = Path.fromList(cs, stitching=Path.STITCH_DISCONTINUOUS, closed=True)

        if draw:
            utils.draw(hexagon, scale=100)

        # to = 5 because each corner contains one stitching segment
        half_hexagon = Path.fromPath(hexagon, fr=0, to=5)
        if draw:
            utils.draw(half_hexagon, scale=100)

        half_hexagon.replace_slice(1, 5, LineSegment(half_hexagon(1), half_hexagon(5)))
        self.assertEqual(half_hexagon.size(), 2)
        self.assertAlmostEqual(half_hexagon(1.5), Point(0.5, 0))

        half_hexagon.stitch_to(half_hexagon(0))
        self.assertAlmostEqual(half_hexagon(2.5), Point())

        a.start(Point(2, 2))
        a.append_SBasis(SBasis(2, 6), SBasis(1, 5) * SBasis(2, 9))
        self.assertAlmostEqual(a(1), Point(6, 5 * 9))

        l = Path.fromList([QuadraticBezier(Point(6, 5 * 9), Point(1, 2), Point(-2, 0.21))])
        a.append_path(l)
        self.assertAlmostEqual(a.final_point(), l.final_point())

        k = Path.fromList([QuadraticBezier(Point(), Point(2, 1), Point(-2, 0.21)).reverse()])
        k.append_portion_to(l, 0, 0.3)
        self.assertAlmostEqual(l.final_point(), k(0.3))
Beispiel #36
0


rnd = np.random.multivariate_normal(med, cov, 1)
noised = rnd + np.random.normal(0,0.2,8)
noised2 = rnd + np.random.normal(0,0.2,8)
noised3 = rnd + np.random.normal(0,0.2,8)
start = time.time()
news = np.absolute(decoder.predict(rnd))
variats = np.absolute(decoder.predict(noised))
variats2 = np.absolute(decoder.predict(noised2))
variats3 = np.absolute(decoder.predict(noised3))
end = time.time()
print('took', end - start)


new = utils.clean_ml_out(news[0])
variat = utils.clean_ml_out(variats[0])
variat2 = utils.clean_ml_out(variats2[0])
variat3 = utils.clean_ml_out(variats3[0])
print(utils.draw(new))
#print(utils.draw(variat))
print('DENSITY', new.mean())
merge = np.array([new,variat,variat2,variat3]).reshape((1, 128*4, 20))
mf = utils.np_seq2mid(utils.normalizer(merge[0]))
mf.open('/tmp/tmp.mid', 'wb')
mf.write()
mf.close()
#subprocess.call("/usr/local/bin/timidity -D 0 -R 1000 /tmp/tmp.mid", stdout=FNULL, stderr=FNULL, shell=True)

Beispiel #37
0
    def gibbs_sample(self, n_iter):
        """
        Perform `n_iter` iterations Gibbs sampling on the FBGMM.

        A record dict is constructed over the iterations, which contains
        several fields describing the sampling process. Each field is described
        by its key and statistics are given in a list which covers the Gibbs
        sampling iterations. This dict is returned.
        """

        # Setup record dictionary
        record_dict = {}
        record_dict["sample_time"] = []
        start_time = time.time()
        record_dict["log_marg"] = []
        record_dict["components"] = []

        # Loop over iterations
        for i_iter in range(n_iter):

            # Loop over data items
            for i in xrange(self.components.N):

                # Cache some old values for possible future use
                k_old = self.components.assignments[i]
                K_old = self.components.K
                stats_old = self.components.cache_component_stats(k_old)

                # Remove data vector `X[i]` from its current component
                self.components.del_item(i)

                # Compute log probability of `X[i]` belonging to each component
                # (24.26) in Murphy, p. 843
                log_prob_z = (
                    np.ones(self.components.K_max)*np.log(
                        float(self.alpha)/self.components.K_max + self.components.counts
                        )
                    )
                # (24.23) in Murphy, p. 842
                log_prob_z[:self.components.K] += self.components.log_post_pred(i)
                # Empty (unactive) components
                log_prob_z[self.components.K:] += self.components.log_prior(i)
                prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))

                # Sample the new component assignment for `X[i]`
                k = utils.draw(prob_z)

                # There could be several empty, unactive components at the end
                if k > self.components.K:
                    k = self.components.K
                # print prob_z, k, prob_z[k]

                # Add data item X[i] into its component `k`
                if k == k_old and self.components.K == K_old:
                    # Assignment same and no components have been removed
                    self.components.restore_component_from_stats(k_old, *stats_old)
                    self.components.assignments[i] = k_old
                else:
                    # Add data item X[i] into its new component `k`
                    self.components.add_item(i, k)

            # Update record
            record_dict["sample_time"].append(time.time() - start_time)
            start_time = time.time()
            record_dict["log_marg"].append(self.log_marg())
            record_dict["components"].append(self.components.K - 1)

            # Log info
            info = "iteration: " + str(i_iter)
            for key in sorted(record_dict):
                info += ", " + key + ": " + str(record_dict[key][-1])
            info += "."
            logger.info(info)

        return record_dict
    sys.stdout.write('running ')
    count = 0
    for t, rank in ts:
        base_results.append(dijkstra_cancel(g, s, t))
        for algorithm in algorithms:
            result = algorithm(g, s, t)
            results[algorithm].append(result)
            if draw_it:
                if len(result) >= 4:
                    search_spaces = result[3]
                    if len(search_spaces) == 2:
                        both = set(search_spaces[0]).intersection(set(search_spaces[1]))
                        search_spaces.append(both)
                else:
                    search_spaces = []
                draw(algorithm.__name__, g, s, [t for (t,r) in ts], search_spaces)
                count += 1
            sys.stdout.write('.')
            sys.stdout.flush()
    sys.stdout.write(' done\n')

    sys.stdout.write("%32s |" % "t")

    for (t, rank) in ts:
        sys.stdout.write("%11d |" % rank)
    sys.stdout.write('\n')
    sys.stdout.write('-' * (34 + len(ts) * 13))
    sys.stdout.write('\n')

    errors = []
    for algorithm in algorithms:
Beispiel #39
0
        # Train discriminator on generated images
        X = np.concatenate((beat_batch, generated_beats))
        y = np.zeros([2 * BATCH_SIZE, 2])
        y[0:BATCH_SIZE, 1] = 1
        y[BATCH_SIZE:, 0] = 1

        #make_trainable(discriminator, True)
        d_loss = discriminator.train_on_batch(X, y)
        losses["d"].append(d_loss)

        # train Generator-Discriminator stack on input noise to non-generated output class
        noise_tr = np.random.uniform(-1, 1, size=(BATCH_SIZE,8))
        y2 = np.zeros([BATCH_SIZE, 2])
        y2[:, 1] = 1

        #make_trainable(discriminator, False)
        g_loss = GAN.train_on_batch(noise_tr, y2)
        losses["g"].append(g_loss)
        tqdm.write("D loss %f, G loss %f" % (losses["d"][-1], losses["g"][-1]))

train_for_n(nb_epoch=200, BATCH_SIZE=128)
print(losses['d'][-1], losses['g'][-1])

# test
noise = np.random.uniform(-1, 1, size=[10, 8])
print(noise)
generateds = generator.predict(noise)
for generated in generateds:
    print(utils.draw(utils.clean_ml_out(generated)))
Beispiel #40
0
    def gibbs_sample(self, n_iter):
        """
        Perform `n_iter` iterations Gibbs sampling on the IGMM.

        A record dict is constructed over the iterations, which contains
        several fields describing the sampling process. Each field is described
        by its key and statistics are given in a list which covers the Gibbs
        sampling iterations. This dict is returned.
        """

        # Setup record dictionary
        record_dict = {}
        record_dict["sample_time"] = []
        start_time = time.time()
        record_dict["log_marg"] = []
        record_dict["components"] = []

        # Loop over iterations
        for i_iter in range(n_iter):

            # Loop over data items
            # import random
            # permuted = range(self.components.N)
            # random.shuffle(permuted)
            # for i in permuted:
            for i in xrange(self.components.N):

                # Cache some old values for possible future use
                k_old = self.components.assignments[i]
                K_old = self.components.K
                stats_old = self.components.cache_component_stats(k_old)

                # Remove data vector `X[i]` from its current component
                self.components.del_item(i)

                # Compute log probability of `X[i]` belonging to each component
                log_prob_z = np.zeros(self.components.K + 1, np.float)
                # (25.35) in Murphy, p. 886
                log_prob_z[:self.components.K] = np.log(self.components.counts[:self.components.K])
                # (25.33) in Murphy, p. 886
                log_prob_z[:self.components.K] += self.components.log_post_pred(i)
                # Add one component to which nothing has been assigned
                log_prob_z[-1] = math.log(self.alpha) + self.components.cached_log_prior[i]
                prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))

                # Sample the new component assignment for `X[i]`
                k = utils.draw(prob_z)
                # logger.debug("Sampled k = " + str(k) + " from " + str(prob_z) + ".")

                # Add data item X[i] into its component `k`
                if k == k_old and self.components.K == K_old:
                    # Assignment same and no components have been removed
                    self.components.restore_component_from_stats(k_old, *stats_old)
                    self.components.assignments[i] = k_old
                else:
                    # Add data item X[i] into its new component `k`
                    self.components.add_item(i, k)

            # Update record
            record_dict["sample_time"].append(time.time() - start_time)
            start_time = time.time()
            record_dict["log_marg"].append(self.log_marg())
            record_dict["components"].append(self.components.K - 1)

            # Log info
            info = "iteration: " + str(i_iter)
            for key in sorted(record_dict):
                info += ", " + key + ": " + str(record_dict[key][-1])
            info += "."
            logger.info(info)

        return record_dict
Beispiel #41
0
decoder_layer = m.layers[-3]
d = Model(enc_in, m.layers[-1](m.layers[-2](m.layers[-3](enc_in))))

# compile
m.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
m.summary()
# train
m.fit(x_train,
      x_train,
      nb_epoch=5,
      batch_size=256,
      shuffle=True,
      validation_data=(x_test, x_test))

print('Test enc dec')
print(utils.draw(x_train[0]))
encdec = m.predict(np.array([x_train[0]]))
print(x_train[0])
print(encdec[0])
#encdec = np.around(encdec)
#print(utils.draw(encdec[0]))
# encform = e.predict(np.array([x_train[0]]))
# print(encform)
# decform = d.predict(encform)
# decform = np.around(decform)
# print(decform.shape)
# print(utils.draw(decform[0]))



Beispiel #42
0
print('gru', gru_predictions.shape, 'svc', svc_predictions.shape, 'rf', rf_predictions.shape, 'knn', knn_predictions.shape)
np.savez_compressed(ROOT + '/cache/labels_c0.npz', gru_predictions, svc_predictions, rf_predictions, knn_predictions)

def vote(votes, w):
    sc = {}
    for i, v in enumerate(votes):
        if v not in sc:
            sc[v] = w[i]
        else:
            sc[v] += w[i]
    sorted_x = sorted(sc.items(), key=operator.itemgetter(1))[::-1]
    return sorted_x[0][0]

for i, t in enumerate(c0_themes):
    print(i, 'gru', clmap[gru_predictions[i]], 'svc', clmap[svc_predictions[i]], 'rf', clmap[rf_predictions[i]], 'knn', clmap[knn_predictions[i]])
    # the latin hack, i want my latin king
    if svc_predictions[i] == 3:
        result = vote([gru_predictions[i], svc_predictions[i], rf_predictions[i], knn_predictions[i]],
                      [0.0, 1.0, 0.1, 0.5])
    else:
        result = vote([gru_predictions[i], svc_predictions[i], rf_predictions[i], knn_predictions[i]], [0.7, 1.0, 0.4, 0.7])
    print('vote result', clmap[result])
    collection.update_one({'_id': ids[i]}, {'$set': {'class': result}})
    print(utils.draw(themes[i]))
    # mf = utils.np_seq2mid(themes[i])
    # mf.open('/tmp/tmp.mid', 'wb')
    # mf.write()
    # mf.close()
    # subprocess.call("/usr/local/bin/timidity -D 0 -R 1000 /tmp/tmp.mid", stdout=FNULL, stderr=FNULL, shell=True)

Beispiel #43
0
    def gibbs_sample(self, n_iter, consider_unassigned=True,
            anneal_schedule=None, anneal_start_temp_inv=0.1,
            anneal_end_temp_inv=1, n_anneal_steps=-1): #, lms=1.0):
        """
        Perform `n_iter` iterations Gibbs sampling on the FBGMM.

        Parameters
        ----------
        consider_unassigned : bool
            Whether unassigned vectors (-1 in `assignments`) should be
            considered during sampling.
        anneal_schedule : str
            Can be one of the following:
            - None: A constant temperature of `anneal_end_temp_inv` is used
              throughout; if `anneal_end_temp_inv` is left at default (1), then
              this is equivalent to not performing annealing.
            - "linear": Linearly take the inverse temperature from
              `anneal_start_temp_inv` to `anneal_end_temp_inv` in
              `n_anneal_steps`. If `n_anneal_steps` is -1 for this schedule,
              annealing is performed over all `n_iter` iterations.
            - "step": Piecewise schedule in which the inverse temperature is
              taken from `anneal_start_temp_inv` to `anneal_end_temp_inv` in
              `n_anneal_steps` steps (annealing will be performed over all
              `n_iter` iterations; it might be worth adding an additional
              variable for this case to allow the step schedule to stop early).

        Return
        ------
        record_dict : dict
            Contains several fields describing the sampling process. Each field
            is described by its key and statistics are given in a list which
            covers the Gibbs sampling iterations.
        """

        # Setup record dictionary
        record_dict = {}
        record_dict["sample_time"] = []
        start_time = time.time()
        record_dict["log_marg"] = []
        record_dict["log_prob_z"] = []
        record_dict["log_prob_X_given_z"] = []
        record_dict["anneal_temp"] = []
        record_dict["components"] = []

        # Setup annealing iterator
        if anneal_schedule is None:
            get_anneal_temp = iter([])
        elif anneal_schedule == "linear":
            if n_anneal_steps == -1:
                n_anneal_steps = n_iter
            anneal_list = 1./np.linspace(anneal_start_temp_inv, anneal_end_temp_inv, n_anneal_steps)
            get_anneal_temp = iter(anneal_list)
        elif anneal_schedule == "step":
            assert not n_anneal_steps == -1, (
                "`n_anneal_steps` of -1 not allowed for step annealing schedule"
                )
            n_iter_per_step = int(round(float(n_iter)/n_anneal_steps))
            anneal_list = np.linspace(anneal_start_temp_inv, anneal_end_temp_inv, n_anneal_steps)
            anneal_list = 1./anneal_list
            anneal_list = np.repeat(anneal_list, n_iter_per_step)
            get_anneal_temp = iter(anneal_list)

        # Loop over iterations
        for i_iter in range(n_iter):

            # Get anneal temperature
            anneal_temp = next(get_anneal_temp, anneal_end_temp_inv)

            # Loop over data items
            for i in xrange(self.components.N):

                # Cache some old values for possible future use
                k_old = self.components.assignments[i]
                if not consider_unassigned and k_old == -1:
                    continue
                K_old = self.components.K
                stats_old = self.components.cache_component_stats(k_old)

                # Remove data vector `X[i]` from its current component
                self.components.del_item(i)

                # Compute log probability of `X[i]` belonging to each component
                # (24.26) in Murphy, p. 843
                log_prob_z = self.lms * (
                    np.ones(self.components.K_max)*np.log(
                        float(self.alpha)/self.components.K_max + self.components.counts
                        )
                    )
                # (24.23) in Murphy, p. 842
                log_prob_z[:self.components.K] += self.components.log_post_pred(i)
                # Empty (unactive) components
                log_prob_z[self.components.K:] += self.components.log_prior(i)
                if anneal_temp != 1:
                    log_prob_z = log_prob_z - logsumexp(log_prob_z)
                    log_prob_z_anneal = 1./anneal_temp * log_prob_z - logsumexp(1./anneal_temp * log_prob_z)
                    prob_z = np.exp(log_prob_z_anneal)
                else:
                    prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
                # prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))

                # Sample the new component assignment for `X[i]`
                k = utils.draw(prob_z)

                # There could be several empty, unactive components at the end
                if k > self.components.K:
                    k = self.components.K
                # print prob_z, k, prob_z[k]

                # Add data item X[i] into its component `k`
                if k == k_old and self.components.K == K_old:
                    # Assignment same and no components have been removed
                    self.components.restore_component_from_stats(k_old, *stats_old)
                    self.components.assignments[i] = k_old
                else:
                    # Add data item X[i] into its new component `k`
                    self.components.add_item(i, k)

            # Update record
            record_dict["sample_time"].append(time.time() - start_time)
            start_time = time.time()
            record_dict["log_marg"].append(self.log_marg())
            record_dict["log_prob_z"].append(self.log_prob_z())
            record_dict["log_prob_X_given_z"].append(self.log_prob_X_given_z())
            record_dict["anneal_temp"].append(anneal_temp)
            record_dict["components"].append(self.components.K)

            # Log info
            info = "iteration: " + str(i_iter)
            for key in sorted(record_dict):
                info += ", " + key + ": " + str(record_dict[key][-1])
            logger.info(info)

        return record_dict
Beispiel #44
0
		def drawSample():
			return samples[draw(dist)]
Beispiel #45
0
 def drawExample():
    return examples[draw(distr)]