Example #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('img1', help=('Image1 - use magnitude'))
    parser.add_argument('img2', help=('Image2 - use phase'))

    args = parser.parse_args()

    images = (args.img1, args.img2)
    for img in images:
        try:
            with open(img, 'rb'):
                pass
        except IOError:
            print >> sys.stderr, "%s could not be opened" % img
            return 1

    img1, img2 = image2array(images[0]), image2array(images[1])

    if img1.shape != img2.shape:
        print >> sys.stderr, "Images should have the same dimensions"
        return 2

    G, H = fft(img1), fft(img2)

    magG = get_magnitude(G)
    phaseH = get_phase(H)

    K = image_from_mag_phase(magG, phaseH)

    img3 = fft(K, True)
    plot([img1, img2, prepare_show(img3, False)])

    return 0
Example #2
0
def main(args):
    """
    Plot job execution times.
    """
    with open(args.fconfig, 'rb') as fp:
        args_fconfig = utils.dict_to_class(json.load(fp))
    utils.plot(args_fconfig)
    return None
Example #3
0
    def plot_results(self):
        """
        Plots acquisition results in a new window
        """
        from utils import plot

        rr_file = "{}.rr.txt".format(self.acquisition_path.encode("utf-8"))
        tag_file = "{}.tag.txt".format(self.acquisition_path.encode("utf-8"))
        plot(rr_file, tag_file)
Example #4
0
def runLR(x, truth, show=False, X=None, truthX=None):
    
    w = train(x,truth)
    #print "w = " + str(w)    

    prediction = np.sign(np.dot(x,w))
    
    green = x[(prediction == 1), 1:]
    red = x[(prediction < 1), 1:]  
    plot(green, red, w, show=show, axis=312 if X == None else 323)
    
    right = x[(prediction == truth),1:]
    wrong = x[(prediction != truth),1:]
    plot(right, wrong, w, show=show, axis=313 if X == None else 325)
    wrongIn = wrong[:,0].size    
    
    if X != None:
        predictionOut = np.sign(np.dot(X,w))
        
        green = X[(predictionOut == 1), 1:]
        red = X[(predictionOut < 1), 1:]  
        plot(green, red, w, show=show, axis=324)
        
        right = X[(predictionOut == truthX),1:]
        wrong = X[(predictionOut != truthX),1:]
        plot(right, wrong, w, show=show, axis=326)
    
    return wrongIn, wrong[:,0].size, w
Example #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('img', help=('Path of the image to perform the'
                        ' transform'))
    parser.add_argument('--rmin', type=int, help='rmin parameter', default=23)
    parser.add_argument('--rmax', type=int, help='rmax parameter', default=48)

    args = parser.parse_args()

    try:
        with open(args.img, 'rb'):
            pass
    except IOError:
        print >> sys.stderr, "%s could not be opened" % args.img
        return 1

    rmin_range = (0, 100)
    if not (rmin_range[0] < args.rmin < rmin_range[1]):
        print >> sys.stderr, "rmin should be between %d and %d" % rmin_range
        return 2

    if not (rmin_range[0] < args.rmax < rmin_range[1]):
        print >> sys.stderr, "rmax should be between %d and %d" % rmin_range
        return 2

    if args.rmin >= args.rmax:
        print >> sys.stderr, 'rmin should be less than rmax'
        return 3

    aimg = image2array(args.img)
    freq_ = fft(aimg)
    shift_freq = fftshift(freq_)

    center = (shift_freq.shape[0] / 2, shift_freq.shape[1] / 2)

    distance_array = euclidean_distance(shift_freq, center)

    mask = (distance_array < args.rmin) != (distance_array > args.rmax)

    shift_freq_cpy = shift_freq.copy()

    shift_freq_cpy[mask] = 0

    output = fft(fftshift(shift_freq_cpy, True), True)

    plot([aimg, prepare_show(shift_freq), prepare_show(shift_freq_cpy),
          prepare_show(output, False)])

    return 0
Example #6
0
def main():
    p = argparse.ArgumentParser()
    p.add_argument("-i", "--interactive", action="store_true")
    kwargs = vars(p.parse_args())
    plt.rcParams["interactive"] = kwargs["interactive"]
    with utils.plot(__file__):
        plot(**kwargs)
Example #7
0
def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()  # <-- less elegant but reduces compile time

    prior_params = softplus_list(params[0])
    print('iter %2d: var_f=%1.2f len_f=%1.2f, nlml=%2.2f' %
          (i, prior_params[0], prior_params[1], neg_log_marg_lik))

    if plot_intermediate:
        plot(mod, i)

    return opt_update(i, gradients, state)
    def train(self):
        rewardperep = []
        for i in range(self.episode):
            state = self.state_to_index(self.env.reset())
            totalreward = 0
            for j in range(self.horizon):
                action = self.act(state, deterministic=False)
                next_state, reward, done, info = self.env.step(action)
                next_state = self.state_to_index(next_state)
                self.update(state, action, reward, next_state)
                totalreward += reward
                state = next_state
                if done == True:
                    break
            rewardperep.append(totalreward)

        plot(rewardperep, 'results/qlambdalearning_traffic_returns.png')
Example #9
0
def train(gan, datasets):
    real_example = gan.input()
    latent = gan.sample()
    fake_example = gan.generator(latent)
    real_logits = gan.discriminator(real_example)
    fake_logits = gan.discriminator(fake_example)

    predict = tf.less_equal((fake_logits), 0.5)
    # p_label = tf.argmax(false_labels,1,output_type=tf.int32)
    acc = tf.reduce_mean(tf.cast(predict, tf.float32))
    tf.summary.scalar('acc', acc)

    g_loss = gan.compute_G_loss(fake_logits,
                                tf.ones(shape=[gan.batch_size, 1]))
    # d_loss = gan.compute_D_loss(real_logits,tf.ones(shape=[gan.batch_size,1]))+\
    #     gan.compute_D_loss(fake_logits,tf.zeros(shape=[gan.batch_size,1]))
    fake_d_loss = gan.compute_fake_D_loss(fake_logits,
                                          tf.zeros_like(fake_logits))

    real_d_loss = gan.compute_real_D_loss(real_logits,
                                          tf.ones_like(real_logits))
    d_loss = fake_d_loss + real_d_loss
    d_op, g_op = gan.train_op(g_loss=g_loss, d_loss=d_loss)
    summary_op = tf.summary.merge_all()
    writer = tf.summary.FileWriter(FLAGS.ckpt, graph=tf.get_default_graph())
    saver = tf.train.Saver()

    global_step = tf.train.get_or_create_global_step()
    global_step = tf.assign_add(global_step, 1)
    init_op = tf.global_variables_initializer()
    train_op = [d_op, g_op]
    with tf.Session() as sess:
        for step in range(FLAGS.max_steps):
            utils.load_or_initial_model(sess, FLAGS.ckpt, saver, init_op)
            a = 0
            G_step = 0
            D_step = 0
            i = 0
            if not os.path.exists('out/'):
                os.makedirs('out/')
            for step in range(FLAGS.max_steps):
                data = datasets.next_batch(gan.batch_size)
                feed_dict = {real_example: data[0]}
                a, loss_d, loss_g, _, g_step, summary_str = \
                    sess.run([acc, d_loss, g_loss, train_op, global_step, summary_op], feed_dict=feed_dict)
                D_step += 1
                G_step += 1
                if g_step % 1000 == 0:
                    print(g_step, loss_d, loss_g, a, D_step, G_step)
                    samples = sess.run(fake_example)
                    samples = samples[:16]
                    writer.add_summary(summary_str, global_step=g_step)
                    # saver.save(sess, FLAGS.ckpt, global_step=g_step)
                    fig = utils.plot(samples)
                    plt.savefig('out/{}.png'.format(str(i).zfill(3)),
                                bbox_inches='tight')
                    i += 1
                    plt.close(fig)
Example #10
0
def pso_svm(data):
    # 初始化参数
    particle_position_vector = np.array([
        np.array([random.random() * 10,
                  random.random() * 10]) for _ in range(args.n_particles)
    ])
    pbest_position = particle_position_vector
    pbest_fitness_value = np.array(
        [float('inf') for _ in range(args.n_particles)])
    gbest_fitness_value = np.array([float('inf'), float('inf')])
    gbest_position = np.array([float('inf'), float('inf')])
    velocity_vector = ([np.array([0, 0]) for _ in range(args.n_particles)])
    iteration = 0

    while iteration < args.n_iterations:
        plot(particle_position_vector)
        for i in range(args.n_particles):
            fitness_cadidate = fitness_function(particle_position_vector[i],
                                                data)
            print("error of particle-", i, "is (training, test)",
                  fitness_cadidate, " At (gamma, c): ",
                  particle_position_vector[i])

            if (pbest_fitness_value[i] > fitness_cadidate[1]):
                pbest_fitness_value[i] = fitness_cadidate[1]
                pbest_position[i] = particle_position_vector[i]

            if (gbest_fitness_value[1] > fitness_cadidate[1]):
                gbest_fitness_value = fitness_cadidate
                gbest_position = particle_position_vector[i]
            elif (gbest_fitness_value[1] == fitness_cadidate[1]
                  and gbest_fitness_value[0] > fitness_cadidate[0]):
                gbest_fitness_value = fitness_cadidate
                gbest_position = particle_position_vector[i]

        for i in range(args.n_particles):
            new_velocity = (
                args.W * velocity_vector[i]) + (args.c1 * random.random()) * (
                    pbest_position[i] - particle_position_vector[i]
                ) + (args.c2 * random.random()) * (gbest_position -
                                                   particle_position_vector[i])
            new_position = new_velocity + particle_position_vector[i]
            particle_position_vector[i] = new_position

        iteration = iteration + 1
Example #11
0
def train(model, x_train_data, y_train_data, x_test_data, y_test_data):
    verbose = 0
    if DEBUG >= 2:
        model.summary()
        verbose = 1
    history = model.fit(x_train_data,
                        y_train_data,
                        epochs=EPOCHS,
                        batch_size=BATCH_SIZE,
                        validation_split=0.05,
                        verbose=verbose,
                        shuffle=True)
    import utils
    if DEBUG >= 3:
        if args['save_config']:
            utils.plot(history.history, 'train.png')
        else:
            utils.plot(history.history)
    model.reset_states()
    prediction = model.predict(x_test_data)

    log = False
    if DEBUG >= 1:
        log = True
    under, over, avg_err = utils.predict_result(prediction, y_test_data, log)
    # print('{0}>{1}, {2}<{3}, {4:.3f}<0.02'.format(under, len(prediction)/2, over, len(prediction)/5, abs(avg_err)))
    err = float(np.max(y_test_data)) * 0.006
    total = len(y_test_data)
    result = {
        'under': under,
        'under_r': under * 100 / total,
        'over': over,
        'over_r': over * 100 / total,
        'avg_err': avg_err
    }
    if result['under_r'] > 60 and result['over_r'] < 20 and abs(avg_err) < err:
        model.save('{0}_lstm_b{1}p{2}_{3:.1f}_{4:.1f}_{5:.3f}.h5'.format(
            name, BLOCK_SIZE, PREDICT_PERIOD, result['under_r'],
            result['over_r'], avg_err))
    if DEBUG >= 3:
        if (args['save_config']):
            utils.plot_predict(prediction, y_test_data, 'predict.png')
        else:
            utils.plot_predict(prediction, y_test_data)
    return result
Example #12
0
    def __call__(self,
                 modelname,
                 split_rate=.9,
                 seq_length=30,
                 batch_size=8,
                 num_layers=2):

        train_size = int(self.prices.train_size * split_rate)
        X = self.prices.X[train_size:train_size + 300, :]
        X = torch.unsqueeze(torch.from_numpy(X).float(), 1)
        X_test, Y_test = utils.data_process(X, X.shape[0], seq_length)
        model = torch.load(modelname + '.model')
        model.eval()
        loss_fn = nn.MSELoss()
        with torch.no_grad():
            loss_sum = 0
            Y_pred = model(X_test[:, :batch_size, :])
            Y_pred = torch.squeeze(Y_pred[num_layers - 1, :, :])
            for i in range(batch_size, X_test.shape[1], batch_size):
                y = model(X_test[:, i:i + batch_size, :])
                y = torch.squeeze(y[num_layers - 1, :, :])
                Y_pred = torch.cat((Y_pred, y))

                loss = loss_fn(Y_test[i:i + batch_size, :], y)
                loss_sum += loss.item()

        print(loss_sum)
        Y_pred.resize_(Y_pred.shape[0] * Y_pred.shape[1])
        Y_test.resize_(Y_test.shape[0] * Y_test.shape[1])

        my_pred = pd.DataFrame(columns=['pred', 'actual'])
        my_pred.head()

        my_pred['pred'] = Y_pred.numpy()
        my_pred['actual'] = Y_test.numpy()

        #/content/drive/My Drive/abc/

        my_pred.to_csv('Stock_rnn.csv', sep=',', encoding='utf-8')

        utils.plot([Y_pred.shape[0], Y_test.shape[0]],
                   [Y_pred.numpy(), Y_test.numpy()], ['blue', 'red'],
                   'Time (Days)', 'Price',
                   'Sample ' + modelname + ' Price Result',
                   ['Prediction', 'Ground Truth'])
Example #13
0
    def __call__(self,
                 model_name,
                 hidden_size=128,
                 seq_length=30,
                 split_rate=.9,
                 batch_size=8,
                 num_epochs=500,
                 num_layers=2):

        train_size = int(self.prices.train_size * split_rate)
        X = torch.unsqueeze(
            torch.from_numpy(self.prices.X[:train_size, :]).float(), 1)
        X_train, Y_train = utils.data_process(X, train_size, seq_length)

        if model_name == 'LSTM':
            model = SimpleLSTM(self.window_size,
                               hidden_size,
                               num_layers=num_layers)
        else:
            model = SimpleGRU(self.window_size,
                              hidden_size,
                              num_layers=num_layers)

        loss_fn = nn.MSELoss()
        optimizer = optim.Adam(model.parameters())
        loss_plt = []

        for epoch in range(num_epochs):
            loss_sum = 0
            for i in range(0, X_train.shape[1] - batch_size, batch_size):
                Y_pred = model(X_train[:, i:i + batch_size, :])
                Y_pred = torch.squeeze(Y_pred[num_layers - 1, :, :])
                loss = loss_fn(Y_train[i:i + batch_size, :], Y_pred)
                loss_sum += loss.item()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            print('epoch [%d] finished, Loss Sum: %f' % (epoch, loss_sum))
            loss_plt.append(loss_sum)

        torch.save(model, model_name + '.model')
        utils.plot([len(loss_plt)], [np.array(loss_plt)], 'black', 'Epoch',
                   'Loss Sum', 'MSE Loss Function')
Example #14
0
def main():
    EPOCH = 10
    MIN_FREQ = 5
    SHUFFLE = True

    trn_texts = open("trn.data").read().strip().split("\n")
    trn_labels = open("trn.label").read().strip().split("\n")
    dev_texts = open("dev.data").read().strip().split("\n")
    dev_labels = open("dev.label").read().strip().split("\n")

    print('perceptron')
    print('-' * 40)
    print('trn data size:', len(trn_texts))
    print('dev data size:', len(dev_texts))

    bag_of_words = BagOfWords(True, True, MIN_FREQ)
    trn_data = bag_of_words.fit_transform(trn_texts, trn_labels)
    dev_data = bag_of_words.transform(dev_texts)

    print('min vocabulary freq:', MIN_FREQ)
    print('vocabulary size:', len(trn_data[0]))
    print('shuffle after epoch:', SHUFFLE)

    perceptron = Perceptron(bag_of_words)

    print('training start\n')
    start = time()
    print('data accurary')
    print_cells(['epoch', 'trn', 'dev'], 9)
    print('-' * 30)

    trn_acc = []
    dev_acc = []

    def epoch_callback(i):
        trn_acc.append(perceptron.accuracy(trn_data, trn_labels))
        dev_acc.append(perceptron.accuracy(dev_data, dev_labels))
        print_cells([i, trn_acc[i], dev_acc[i]], 9)

    perceptron.fit(trn_data, trn_labels, EPOCH, SHUFFLE, epoch_callback)

    print('\ntraining end')
    print('duration:', round(time() - start))

    plot(list(range(0, EPOCH)), trn_acc, dev_acc, 'Perceptron')
Example #15
0
def main():

    # For custom use, rewrite this section with your own data
    ###################################################################
    taskname = "SUMMARIZATION"
    HJ, model_probs, labels, length_list = get_data()
    ###################################################################

    HUSE, HUSEQ, HUSED = calculate_HUSE(HJ, model_probs, labels, length_list)

    # OUTPUT
    print("For the task of {}".format(taskname))
    print("Overall HUSE score is: {}".format(HUSE))
    print("HUSE-Q (just human) score is: {}".format(HUSEQ))
    print("HUSE-D score is: {}".format(HUSED))

    # Plot saved to {taskname}.pdf
    plot(taskname, HJ, model_probs, labels, length_list)
Example #16
0
def p01e(train_path, eval_path, pred_path):
    """Gaussian discriminant analysis

    Args:
        train_path: path to csv file for training data
        eval_path: path to csv file for validation data
        pred_path: path to save predictions
    """

    x_train, y_train = utils.load_dataset(train_path, add_intercept=False)
    model = GaussianDiscriminantAnalysis()
    model.fit(x_train, y_train)
    x_val, y_val = utils.load_dataset(eval_path, add_intercept=False)
    y_pred = model.predict(x_val)
    utils.plot(x_val, y_val, model.theta, "{}.png".format(pred_path))

    # Use np.savetxt to save outputs from validation set to pred_path
    np.savetxt(pred_path, y_pred)
Example #17
0
def p01b(train_path, eval_path, pred_path):
    """Logistic regression with Newton's Method

    Args:
        train_path: Path to CSV file containing dataset for training.
        eval_path: Path to CSV file containing dataset for evaluation.
        pred_path: Path to save predictions.
    """
    # Train classifier
    x_train, y_train = utils.load_dataset(train_path, add_intercept=True)
    model = LogisticRegression(eps=1e-5)
    model.fit(x_train, y_train)

    # Validate classifier
    x_val, y_val = utils.load_dataset(eval_path, add_intercept=True)
    y_pred = model.predict(x_val)
    utils.plot(x_val, y_val, model.theta, "{}.png".format(pred_path))
    np.savetxt(pred_path, y_pred)
Example #18
0
def draw_mobility(countries):
    FEATURES_VALUES = utils.features_values()
    value_vars = utils.MOBILITY
    data = FEATURES_VALUES.melt(id_vars=['Date', 'CountryName'],
                                value_vars=value_vars,
                                var_name='Measures',
                                value_name="Value")
    data['Measures'] = data.apply(pretty_name, axis=1)

    for country in countries:
        utils.plot('line',
                   data.loc[data['CountryName'] == country],
                   'data_visualization_mobility_' + country,
                   'Date',
                   'Value',
                   'Variation of activity [%]',
                   hue='Measures',
                   legend_pos=None)
Example #19
0
def plot_motion_z_position(pool_physics, request):
    show_plots, save_plots = request.config.getoption(
        '--show-plots'), request.config.getoption('--save-plots')
    if not (show_plots or save_plots):
        yield
        return
    from utils import plot_ball_motion as plot
    yield
    test_name = '_'.join(
        [request.function.__name__, pool_physics.ball_collision_model])
    plot(0,
         pool_physics,
         title=test_name + " ($z$ position)",
         coords=(2, ),
         collision_depth=1,
         filename=os.path.join(PLOTS_DIR, test_name +
                               '-z.png') if save_plots else None,
         show=show_plots)
Example #20
0
    def generate(self, sess, feed_dict, index):
        samples = sess.run(self.gen_input, feed_dict=feed_dict)

        fig = utils.plot(samples[0:16, :])
        path = os.path.join(FLAGS.train_dir, 'out/')
        if not os.path.exists(path):
            os.makedirs(path)
        plt.savefig(os.path.join(path + '{}.png'.format(str(index).zfill(3))), bbox_inches='tight')
        plt.close(fig)
 def plot_predictions(self, X, filepath=None, title='Predictions'):
     if filepath is None:
         filepath = self.results_dir + '/predictions.png'
     pred = self.predict(X)
     images = np.concatenate([
         np.transpose(X, (1, 0, 2, 3, 4)),
         np.transpose(pred, (1, 0, 2, 3, 4))
     ],
                             axis=0)
     images = np.reshape(
         images,
         (2 * X.shape[0] * X.shape[1], X.shape[2], X.shape[3], X.shape[4]))
     plot(
         filepath, title,
         make_mosaic(images,
                     nrows=X.shape[0],
                     ncols=int(2 * X.shape[1]),
                     clip=True))
Example #22
0
def plot_initial_positions(pool_physics, pool_table, request):
    show_plots, save_plots = request.config.getoption(
        '--show-plots'), request.config.getoption('--save-plots')
    if not (show_plots or save_plots):
        yield
        return
    from utils import plot_motion_timelapse as plot
    yield
    test_name = '_'.join(
        [request.function.__name__, pool_physics.ball_collision_model])
    plot(pool_physics,
         table=pool_table,
         nt=0,
         t_0=0.0,
         t_1=0.0,
         title=test_name + ' (initial positions)',
         filename=os.path.join(PLOTS_DIR, test_name + '-initial-positions.png')
         if save_plots else None,
         show=show_plots)
def main():
    for episode in range(NUM_OF_EPISODES):
        game = env.Easy21()
        state = game.get_state()
        action = epsilon_greedy_action(state)
        is_terminal = False

        while not is_terminal:
            new_state, reward, is_terminal = game.step(action)
            new_action = epsilon_greedy_action(new_state)

            # Update the visit count.
            N[state[0], state[1], action] += 1

            # Update the Q value.

            # How much we expect the our current Q-value to be wrong. We estimate the true Q
            # value by looking at the obtained reward and the next Q value we would pick.
            error = (reward + get_Q(new_state, new_action)) - get_Q(
                state, action)

            # Step-size for the update. Decreases over time, when we visit the state more and
            # more we are more confident of our value, so we update it less. The fact that alpha
            # will eventually go to zero is essential for convergence of the algorithm toward
            # the optimal policy.
            alpha = 1 / N[state[0], state[1], action]

            # Update the Q values using Sarsa algorithm.
            # Q(S, A) <-- Q(S, A) + alpha*( reward + discount*Q(S', A') - Q(S, A) )
            # Note that discount is 1 in this case.
            Q[state[0], state[1],
              action] = get_Q(state, action) + alpha * error

            # Move to the new state-action pair.
            state = new_state
            action = new_action

        if episode % PRINT_EVERY == 0:
            print(episode)
            #print(Q)
            #utils.plot(Q)

    utils.plot(Q)
Example #24
0
def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    # neg_log_marg_lik, gradients = mod.run()
    neg_log_marg_lik, gradients = mod.run_two_stage()

    prior_params = softplus_list(params[0])
    # print('iter %2d: var1=%1.2f len1=%1.2f om1=%1.2f var2=%1.2f len2=%1.2f om2=%1.2f var3=%1.2f len3=%1.2f om3=%1.2f '
    #       'var4=%1.2f len4=%1.2f var5=%1.2f len5=%1.2f var6=%1.2f len6=%1.2f '
    #       'vary=%1.2f, nlml=%2.2f' %
    #       (i, prior_params[0][0], prior_params[0][1], prior_params[0][2],
    #        prior_params[1][0], prior_params[1][1], prior_params[1][2],
    #        prior_params[2][0], prior_params[2][1], prior_params[2][2],
    #        prior_params[3][0], prior_params[3][1],
    #        prior_params[4][0], prior_params[4][1],
    #        prior_params[5][0], prior_params[5][1],
    #        softplus(params[1]), neg_log_marg_lik))
    # print('iter %2d: len1=%1.2f om1=%1.2f len2=%1.2f om2=%1.2f len3=%1.2f om3=%1.2f '
    #       'var4=%1.2f len4=%1.2f var5=%1.2f len5=%1.2f var6=%1.2f len6=%1.2f '
    #       'vary=%1.2f, nlml=%2.2f' %
    #       (i, prior_params[0][0], prior_params[0][1],
    #        prior_params[1][0], prior_params[1][1],
    #        prior_params[2][0], prior_params[2][1],
    #        prior_params[3][0], prior_params[3][1],
    #        prior_params[4][0], prior_params[4][1],
    #        prior_params[5][0], prior_params[5][1],
    #        softplus(params[1]), neg_log_marg_lik))
    print(
        'iter %2d: len1=%1.2f om1=%1.2f len2=%1.2f om2=%1.2f len3=%1.2f om3=%1.2f '
        'len4=%1.2f len5=%1.2f len6=%1.2f '
        'vary=%1.2f, nlml=%2.2f' %
        (i, prior_params[0][0], prior_params[0][1], prior_params[1][0],
         prior_params[1][1], prior_params[2][0], prior_params[2][1],
         prior_params[3], prior_params[4], prior_params[5], softplus(
             params[1]), neg_log_marg_lik))

    if plot_intermediate:
        plot(mod, i)

    return opt_update(i, gradients, state)
Example #25
0
def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    batch_ind = np.random.permutation(N)[:N_batch]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run(batch_ind=batch_ind)
    nlml = neg_log_marg_lik * N / N_batch

    print('iter %2d: nlml=%2.2f' %
          (i, nlml))

    if plot_intermediate:
        plot(mod, i)

    return opt_update(i, gradients, state)
Example #26
0
def main():
    """
	Main function to drive the simulator. The expected usage is:
	./sim.py -c <path to circuit netlist>
	"""

    # set up argument parser
    parser = ArgumentParser()
    parser.add_argument('-c', help='Circuit netlist file', required=True)
    parser.add_argument('-o', help='Output .wav file', default=None)

    try:
        # extract command line arguments
        args = parser.parse_args()
        netlist = Netlist(args.c)
        circuit = netlist.as_circuit()

        # solve the circuit at every timestamp for the input signal
        timescale, input_signal, vout = circuit.transient()
        vout = np.array(vout)
        t_start, t_end = circuit.timescale()

        # write data to output wavfile
        outfile = args.o
        if outfile is not None:
            rate = timescale[1] - timescale[0]
            fs = int(1.0 / rate)
            max_amp = np.max(np.abs(vout))
            sigf32 = (vout / max_amp).astype(np.float32)
            sd.play(sigf32, fs * 100)
            # wavfile.write(outfile, fs, sigf32)

        # plot the results
        plt.subplot(1, 2, 1)
        plot(t_start, t_end, input_signal, title="Input Signal")
        plt.subplot(1, 2, 2)
        plot(t_start, t_end, vout, title="Output Signal")
        plt.show()

    except IOError as e:
        parser.print_help()
        print("\nIOError {}".format(e))
        sys.exit(-1)
Example #27
0
 def _plot_and_write(plot_dict,
                     loc,
                     x_label="",
                     y_label="",
                     title="",
                     kind='line',
                     legend=True,
                     moving_average=False):
     for key in plot_dict:
         plot(data={key: plot_dict[key]},
              loc=loc + str(key) + ".pdf",
              x_label=x_label,
              y_label=y_label,
              title=title,
              kind=kind,
              legend=legend,
              index_col=None,
              moving_average=moving_average)
         write_to_csv(data={key: plot_dict[key]}, loc=loc + ".csv")
Example #28
0
    def silence(self):
        threshold = [-1*int(10*i) for i in range(1, 11)][::-1]
        precisionArr = []
        recallArr = []
        FscoreArr = []
        for value in threshold:
            print("threshold: {} being evaluated".format(value))
            valueResults = []
            for i, filename in enumerate(self.files):
                print("Executing file {} number {}/{}".format(filename, i+1, len(self.files)), end='\r')
                audio, sr, channels, _, _, _ = std.AudioLoader(filename=filename)()
                audio = np.sum(audio, axis=1)/channels
                _, ret = essStartstopDetector(audio, threshold=value)
                valueResults.append((filename.replace(self.wavDatasetPath, ""), ret))
            print('')
            valueResults = sorted(valueResults, key=lambda x: x[0])
            _, precision, recall = self.evaluateValue(valueResults, "Clicks")
            precisionArr.append(precision)
            recallArr.append(recall)
            FscoreArr.append((1 + Fbeta**2) * precision * recall / (Fbeta**2 * precision + recall))
        u.plot("./results/silencethreshold.png", precision=precisionArr,recall=recallArr, Fscore=FscoreArr, x_values=threshold)

        frameSize = [int(2**i) for i in range(5,10)]
        precisionArr = []
        recallArr = []
        FscoreArr = []
        for value in frameSize:
            print("frameSize: {} being evaluated".format(value))
            valueResults = []
            for i, filename in enumerate(self.files):
                print("Executing file {} number {}/{}".format(filename, i+1, len(self.files)), end='\r')
                audio, sr, channels, _, _, _ = std.AudioLoader(filename=filename)()
                audio = np.sum(audio, axis=1)/channels
                _, ret = essStartstopDetector(audio, frameSize=value, hopSize=value)
                valueResults.append((filename.replace(self.wavDatasetPath, ""), ret))
            print('')
            valueResults = sorted(valueResults, key=lambda x: x[0])
            _, precision, recall = self.evaluateValue(valueResults, "Clicks")
            precisionArr.append(precision)
            recallArr.append(recall)
            FscoreArr.append((1 + Fbeta**2) * precision * recall / (Fbeta**2 * precision + recall))
        u.plot("./results/silenceframeSize.png", precision=precisionArr, recall=recallArr, Fscore=FscoreArr, x_values=frameSize)
Example #29
0
def summary():
    message = '数据统计'
    vars = dict(request.vars)
    labels = []
    sizes = []
    try:
        if 'submit' in vars:
            table_name = vars['table_name']
            data_type = vars['data_type']
            start_date = vars['start']
            end_date = vars['end']
            sql = f'select branch_name, {data_type} from {table_name} where open_date between "{start_date}" and "{end_date}" group by branch_name;'
            table_values = bankdb.executesql(sql)
            labels = [row[0] for row in table_values]
            sizes = [float(row[1]) for row in table_values]
            utils.plot(labels, sizes)
    except Exception as e:
        response.flash = sql + str(e)
        massage = sql
    return dict(locals())
Example #30
0
def draw_death_over_mobility(countries):
    FEATURES_VALUES = utils.features_values()
    value_vars = ['{}_15days'.format(mobility) for mobility in utils.MOBILITY]
    data = FEATURES_VALUES.melt(
        id_vars=['CountryName', 'ConfirmedDeaths', 'R'],
        value_vars=value_vars,
        var_name='Measures',
        value_name="Value")
    data['Measures'] = data.apply(pretty_name, axis=1)

    for country in countries:
        utils.plot('scatter',
                   data.loc[data['CountryName'] == country],
                   'data_visualization_death_to_measure_' + country,
                   'Value',
                   'R',
                   'Rt',
                   x_label='mobility',
                   hue='Measures',
                   legend_pos=None)
Example #31
0
    def plot_history(self, sid: int = None, show_trades: bool = False):
        """provides a plot of the batl/batb over time"""
        if sid is None:
            sid = max(self.batb_history,
                      key=lambda k: len(self.batb_history[k]))

        fig = plot(sid, self.batb_history[sid], self.batl_history[sid])
        if show_trades:
            fig = plot_trades(fig, self.trade_times[sid],
                              self.trade_price[sid], self.trade_direction[sid])

        return fig
Example #32
0
def plot_final_positions(pool_physics, pool_table, request):
    show_plots, save_plots = request.config.getoption('--show-plots'), request.config.getoption('--save-plots')
    if not (show_plots or save_plots):
        yield
        return
    from utils import plot_motion_timelapse as plot
    yield
    test_name = '_'.join([request.function.__name__, pool_physics.ball_collision_model])
    events = pool_physics.events
    if events:
        t1 = events[-1].t
        if events[-1].T < float('inf'):
            t1 += events[-1].T
    else:
        t1 = 0.0
    plot(pool_physics, table=pool_table,
         nt=0,
         t_0=t1, t_1=t1,
         title=test_name + ' (final positions)',
         filename=os.path.join(PLOTS_DIR, test_name + '-final-positions.png') if save_plots else None,
         show=show_plots)
Example #33
0
def runPla(x, f, w=np.zeros((3)), show=False, X=None):
    
    truth = np.sign(np.dot(x,f))
    green = x[(truth == 1), 1:]
    red = x[(truth < 1), 1:]
        
    plot(green, red, f, show=show, axis=311)
    
    right = np.empty([0,2])
    wrong = np.empty([0,2])
    count = 0
    while right[:].size < x[:].size:
        prediction = np.sign(np.dot(x,w))
        #print "prediction = " + str(prediction)
    
        green = x[(prediction == 1), 1:]
        red = x[(prediction < 1), 1:]  
        plot(green, red, w, show=show, axis=312)
        
        right = x[(prediction == truth),1:]
        wrong = x[(prediction != truth),1:]
        plot(right, wrong, w, show=show, axis=313)
        if wrong.size == 0:
            break
        w = train(w, x, prediction, truth)
        count += 1
        if show:
            pause(.1)
    return count, w

    
Example #34
0
def main(bee_count, move_count, cities_count):
    all_cities = utils.loadCities(cities_count)

    super_extra_ultra_special_intergalactic_bee = Bee()
    bees = []
    
    for i in range(bee_count):
        bees.append(Bee())

    while super_extra_ultra_special_intergalactic_bee.is_not_complete(all_cities):

        for bee in bees:
            bee.move(move_count, all_cities)

        bees = sorted(bees, key=lambda be: be.distance, reverse=False)
        super_extra_ultra_special_intergalactic_bee = bees[0]

        max_distance = max(bees, key=lambda b: b.distance).distance
        min_distance = min(bees, key=lambda b: b.distance).distance
        middle_distance_difference = (max_distance + min_distance) / 2

        recruiters = []
        for bee in bees:
            if bee.distance > middle_distance_difference:
                bee.change_role(True)
                recruiters.append(bee)
            else:
                bee.change_role(False)

        for bee in bees:
            if not bee.recruiter:
                rndm = rand.uniform(0, 1)
                selected_bee = Bee()
                if rndm < 0.5:
                    selected_bee = recruiters[rand.randrange(0, len(recruiters) - 1)]
                else:
                    selected_bee = super_extra_ultra_special_intergalactic_bee
                bee.replace_cities(selected_bee.visited_cities[:])

    utils.plot(super_extra_ultra_special_intergalactic_bee.visited_cities)
Example #35
0
def runPla(x, f, w=np.zeros((3)), show=False, X=None):

    truth = np.sign(np.dot(x, f))
    green = x[(truth == 1), 1:]
    red = x[(truth < 1), 1:]

    plot(green, red, f, show=show, axis=311)

    right = np.empty([0, 2])
    wrong = np.empty([0, 2])
    count = 0
    while right[:].size < x[:].size:
        prediction = np.sign(np.dot(x, w))
        #print "prediction = " + str(prediction)

        green = x[(prediction == 1), 1:]
        red = x[(prediction < 1), 1:]
        plot(green, red, w, show=show, axis=312)

        right = x[(prediction == truth), 1:]
        wrong = x[(prediction != truth), 1:]
        plot(right, wrong, w, show=show, axis=313)
        if wrong.size == 0:
            break
        w = train(w, x, prediction, truth)
        count += 1
        if show:
            pause(.1)
    return count, w
Example #36
0
def main():
    warnings.filterwarnings('ignore')
    seed = 1234
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

    parser = arg_parser()
    args = parser.parse_args()

    if osp.exists('../trained_models') is False:
        os.makedirs('../trained_models')
    if osp.exists('../predictions') is False:
        os.makedirs('../predictions')

    if args.model == 'unet':
        model = UNet(3, 4)
    elif args.model == 'resunet_a':
        model = ResUNet_a(3, 4)
    else:
        model = smp.Unet(args.model, encoder_weights='imagenet',
                         classes=4, activation='sigmoid')

    model_trainer = Trainer(model, loss=args.loss)
    model_trainer.start()

    losses = model_trainer.losses
    dice_scores = model_trainer.dice_scores
    iou_scores = model_trainer.iou_scores

    if osp.exists('../results') is False:
        os.makedirs('../results')
    plot(losses, name='{}_Loss'.format(args.loss))
    plot(dice_scores, name='Dice_score')
    plot(iou_scores, name='IoU_score')
Example #37
0
def cond_samples(f, replay_buffer, args, fresh=False):
    if fresh:
        replay_buffer = uncond_samples(f, args, save=False)
    n_it = replay_buffer.size(0) // 100
    all_y = []
    for i in range(n_it):
        x = replay_buffer[i * 100:(i + 1) * 100].to(device)
        y = f.classify(x).max(1)[1]
        all_y.append(y)

    all_y = torch.cat(all_y, 0)
    each_class = [replay_buffer[all_y == l] for l in range(10)]
    print([len(c) for c in each_class])
    for i in range(100):
        this_im = []
        for l in range(10):
            this_l = each_class[l][i * 10:(i + 1) * 10]
            this_im.append(this_l)
        this_im = torch.cat(this_im, 0)
        if this_im.size(0) > 0:
            plot("./save/cond_samples/samples_{}.png".format(i), this_im)
        print(i)
Example #38
0
    
    def shouldExplore(self):
        return random.random() <= self.epsilon

if __name__ == "__main__":
    actualActionValues = utils.generateActionValues(10);
    bandit = MultiArmedBandit(10, actualActionValues);
    agent = AgentEpsilonGreedy(bandit, 10, 0.1);
    mse = []
    for _ in range(0, 10000):
        agent.learn()
        mse += [np.mean(np.square(actualActionValues - agent.actionValueEstimates))];

    print actualActionValues;
    print agent.actionValueEstimates;
    utils.plot(mse);

class AgentSoftmax(Agent):
    
    def __init__(self, bandit, numberOfArms, epsilon, temperature):
        Agent.__init__(self, bandit, numberOfArms);
        self.epsilon = epsilon;
        self.temperature = temperature;
        self.armSelectionCount = np.zeros(numberOfArms);

    def getAlpha(self, arm):
        return 1.0 / self.armSelectionCount[arm - 1];

    def chooseArm(self):
        choices = [math.exp(q/self.temperature) for q in self.actionValueEstimates ]
        totalSum = sum(choices)
Example #39
0
          (d["num_filled"] == num_filled) &
          (d["num_shells"] >= num_shells_range[0]) &
          (d["num_shells"] <= num_shells_range[1]) &
          (d["freq"] == 1.0)]
    num_particles = num_filled * (num_filled + 1)
    energy_type = {"ground": "ground state",
                   "add": "addition",
                   "rm": "removal"}[label]
    fig, ax = plt.subplots()
    fig.set_size_inches((4, 3))
    for method, case in d.groupby("method"):
        case = case.sort_values("num_shells")
        xs = case["num_shells"].astype(int)
        ys = case["energy"]
        ax.plot(xs, ys, "-x", label=utils.METHOD_LABEL[method],
                color=utils.METHOD_COLOR[method])
        ax.get_xaxis().set_major_locator(
            matplotlib.ticker.MaxNLocator(integer=True))
    ax.set_xlabel("K (number of shells)")
    ax.set_ylabel("E (energy)")
    ax.legend()
    fig.tight_layout()
    utils.savefig(fig,
                  "by-num-shells-{num_particles}-{num_filled}-"
                  "{label}-{ml}-{interaction}"
                  .format(**locals()))

with utils.plot(__file__, call=plot) as interactive:
    if not interactive:
        plot("ground", freq=1.0, num_filled=2, num_shells_range=[4, 15])
Example #40
0
    def get_reliability(self):


        # finding sources 
        self.source_finder(image=self.imagename, lsmname=self.poslsm, 
                           thresh=self.pos_smooth, **self.opts_pos)

        self.source_finder(image=self.negativeimage, lsmname=self.neglsm,
                           thresh=self.neg_smooth, **self.opts_neg)

        # removing sources within a specified radius
        self.remove_sources_within(catalog=self.poslsm, rel_excl_src=
                                   self.rel_excl_src)
        self.remove_sources_within(catalog=self.neglsm, rel_excl_src=
                                   self.rel_excl_src)

        # add local variance as a parameter
        if self.do_local_var:
            utils.local_variance(self.imagedata, self.header, 
                              catalog=self.poslsm, wcs=self.wcs, 
                              pixelsize=self.pixelsize, local_region=
                              self.local_var_region, savefig=False,
                              highvariance_factor=None, prefix=self.prefix,
                              neg_side=True)

            utils.local_variance(self.imagedata, self.header,
                              catalog=self.neglsm, wcs=self.wcs,
                              pixelsize=self.pixelsize, local_region=
                              self.local_var_region, savefig=False,
                              highvariance_factor=None, prefix=self.prefix, neg_side=True)
        # compute correlation if only do_psf_corr = True 
        #and the psf is provided 
        if self.do_psf_corr and self.psfname:
            utils.psf_image_correlation(
                 catalog=self.poslsm, psfimage=self.psfname,
                 imagedata=self.imagedata, header=self.header,
                 wcs=self.wcs, pixelsize=self.pixelsize,
                 corr_region=self.psf_corr_region, prefix= self.prefix)
            utils.psf_image_correlation(
                 catalog=self.neglsm, psfimage=self.psfname, 
                 imagedata=self.imagedata, header=self.header,
                 wcs=self.wcs, pixelsize=self.pixelsize, 
                 corr_region=self.psf_corr_region, prefix=self.prefix)
      
        ##TODO verbose vs. logging
        pmodel = Tigger.load(self.poslsm, verbose=self.loglevel)
        nmodel = Tigger.load(self.neglsm, verbose=self.loglevel)
        
        posSources = pmodel.sources
        negSources = nmodel.sources

        npsrc = len(posSources)
        nnsrc = len(negSources)      
 
        positive, labels = self.params(posSources, pmodel)
        negative, labels = self.params(negSources, nmodel)

        # setting up a kernel, Gaussian kernel
        bandwidth = []

        for plane in negative.T:
            bandwidth.append(plane.std())



        nplanes = len(labels)
        cov = numpy.zeros([nplanes, nplanes])


        for i in range(nplanes):
            for j in range(nplanes):
                if i == j:
                    cov[i, j] = bandwidth[i]*((4.0/((nplanes+2)*
                                  npsrc))**(1.0/(nplanes+4.0)))

        pcov = utils.gaussian_kde_set_covariance(positive.T, cov)
        ncov = utils.gaussian_kde_set_covariance(negative.T, cov)
    

        # get number densities
        nps = pcov(positive.T) * npsrc
        nns = ncov(positive.T) * nnsrc

        # define reliability of positive catalog
        rel = (nps-nns)/nps

        for src, rf in zip(posSources, rel):
            src.setAttribute("rel", rf)
            out_lsm = self.poslsm
        pmodel.save(out_lsm)

        if self.makeplots:
            savefig = self.prefix + "_planes.png"
            utils.plot(positive, negative, rel=rel, labels=labels,
                        savefig=savefig, prefix=self.prefix)

        return  self.poslsm, self.neglsm      
Example #41
0
    
sum = 0.0
run = 0.0
while run < runs:
    f = points2weights(np.random.random((2,2)) * 2 - 1)
    x = np.insert(np.random.random((N,2)) * 2 - 1,0,np.ones((1,N)), axis=1)
    testSet = np.insert(np.random.random((N*10,2)) * 2 - 1,0,np.ones((1,N*10)), axis=1)
    
    y = np.sign(np.dot(x,f))
    testTruth = np.sign(np.dot(testSet,f))
    
    w = np.zeros(3)
    
    green = x[(y == 1), 1:]
    red = x[(y != 1), 1:]
    plot(green, red, f, axis=311, show=True)
    count = 0
    
    while count < 1000:
        oldW = np.copy(w)
        element = 0
        while element < N:
            w += lr * gradient(x[element],y[element],w)
            element += 1
        prediction = np.sign(np.dot(x,np.transpose(w)))
        right = x[(prediction == y),1:]
        wrong = x[(prediction != y),1:]
        plot(right, wrong, w, show=False, axis=313)#, xlim=[-5,5], ylim=[-5,5])
        #pause(.05)
        if mag(oldW, w) < 0.01:
            break
Example #42
0
#!/usr/bin/env python3
# Generates ../FigureFiles/fig-by-freq-*.svg from the data files.
import argparse, itertools, os, sys
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import utils

p = argparse.ArgumentParser()
p.add_argument("-i", "--interactive", action="store_true")
p.add_argument("-t", "--title", action="store_true")
p.add_argument("-int", "--interaction", default="normal")
p.add_argument("label", metavar="type", help="ground, add, or rm")
kwargs = vars(p.parse_args())
plt.rcParams["interactive"] = kwargs["interactive"]
with utils.plot(__file__):
    d = utils.load_all()
    num_shells = 10
    num_filled = 2
    interaction = kwargs["interaction"]
    label = kwargs["label"]
    ml = utils.label_num_filled_to_ml(label, num_filled)
    d = d[(d["method"] != "imsrg[f]+eom[n]") &
          (d["method"] != "magnus_quads+eom") &
          (d["interaction"] == interaction) &
          (d["label"] == label) &
          (d["num_shells"] == num_shells) &
          (d["num_filled"] == num_filled) &
          # filter out higher frequencies because they stretch the plot too much
          (d["freq"] <= 1.0) &
          (d["ml"] == ml)]
Example #43
0
            s2, r, done, info = env.step(a)
            r = -1 * (0.5 - s2[0])
            f = features.get_features(s2)
            #s = s2
            reward += r
            if done:
                break
        total_reward.append(reward)
    return sum(total_reward)/len(total_reward)

env = gym.make('MountainCar-v0')
num_action = env.action_space.n
num_base_func = 12
nb = num_base_func*num_action+1
sigma = 0.5 #2.5
num_iterations = 10
gamma = 0.999
ep = 0.2        #0.2
num_sampling = 100
features = RBF(env,sigma,num_base_func)

if __name__ == '__main__':
    w,error_list,reward_list = LSPI(plot=True)
    try:
        test(w,render=True)
    finally:
        print w
        utils.plot(error_list,reward_list)
    

Example #44
0
aveW = np.zeros((1,6))
show = False

for i in range(0,runs):
    print "Running test # " + str(i)
    
    x = np.insert(np.random.random((d,2)) * 2 - 1,0,np.ones((1,d)), axis=1)
    x = np.append(x, x[:,1:2] * x[:,2:3], axis=1)
    x = np.append(x, np.square(x[:,1:3]), axis=1)
    truth = np.sign(np.square(x[:,1]) + np.square(x[:,2]) - .6)
    noise = np.append(np.ones((d*.9)), np.ones((d - d*.9)) * -1, axis=0)
    np.random.shuffle(noise)
    truth *= noise
    i,o,w = lr.runLR(x, truth, show=show)
    
    plotX = np.linspace(-1,1,1000)
    plotY = np.sqrt(np.square(plotX) * -1 + .6) 
    plotX = np.append(plotX, plotX, axis=1)
    plotY = np.append(plotY, -plotY, axis=1)
    green = x[(truth == 1), 1:]
    red = x[(truth < 1), 1:]  
    plot(green, red, [7,1,7], axis=311, show=show, other=[plotX, plotY,'b-'])
    pause(.1)
    
    wrongIn += i
    aveW = aveW + w
print "Average of " + str(wrongIn/runs) + " wrong in sample per run"
fractionWrong = (wrongIn/runs)/d
print "%f incorrect on average in sample"%(fractionWrong)
print "ave w is " + str(aveW/runs)
    def get_reliability(self):


        # finding sources 
        self.log.info(" Extracting the sources on both sides ")

        pfile = self.prefix + self.catalogue_format + ".fits"
        nfile = self.prefix + "_negative" + self.catalogue_format + ".fits"
        # i need to catch mmap.mmap error here

        # running a source finder
        self.source_finder(image=self.negimage,
                           output=nfile, thresh=self.neg_smooth,
                           savemask=self.savemaskneg,
                           prefix=self.prefix, **self.opts_neg)

        self.source_finder(image=self.imagename,
                           output=pfile, thresh=self.pos_smooth, 
                           savemask=self.savemaskpos,
                           prefix=self.prefix, **self.opts_pos)

        self.log.info(" Source Finder completed successfully ")


         
        pmodel, positive, labels = self.params(pfile)
        nmodel, negative, labels = self.params(nfile)
     
        # setting up a kernel, Gaussian kernel
        bandwidth = []

        for plane in negative.T:
            bandwidth.append(plane.std())

        nplanes = len(labels)
        cov = numpy.zeros([nplanes, nplanes])
        nnsrc = len(negative)
        npsrc = len(positive)
        
        self.log.info(" There are %d positive and %d negtive detections "%(npsrc, nnsrc))
 
        if nnsrc == 0 or npsrc ==0:
            self.log.error("The resulting array has length of 0 thus cannot compute"
                    " the reliability. Aborting.")

        self.log.info(" Computing the reliabilities ")
        for i in range(nplanes):
            for j in range(nplanes):
                if i == j:
                    cov[i, j] = bandwidth[i]*((4.0/((nplanes+2)*
                                  nnsrc))**(1.0/(nplanes+4.0)))
        self.log.info("The resulting covariance matrix is %r"%cov)

        pcov = utils.gaussian_kde_set_covariance(positive.T, cov)
        ncov = utils.gaussian_kde_set_covariance(negative.T, cov)
    
        # get number densities
        nps = pcov(positive.T) * npsrc
        nns = ncov(positive.T) * nnsrc

        # define reliability of positive catalog
        rel = (nps-nns)/nps
        for src, rf in zip(pmodel.sources, rel):
            src.setAttribute("rel", rf)
        self.log.info(" Saved the reliabilities values.")

        # remove sources with poor correlation and high reliability,
        # the values are currently arbitrary
        if self.do_psf_corr and self.derel:
            for s in pmodel.sources:
                cf, r = s.correlation_factor, s.rel
                if cf < 0.006 and r > 0.60:
                    s.rel = 0.0    

        if self.makeplots:
            savefig = self.prefix + "_planes.png"
            utils.plot(positive, negative, rel=rel, labels=labels,
                        savefig=savefig, prefix=self.prefix)

        # removes sources in a given radius from the phase center
        if self.radiusrm:
            self.log.info(" Remove sources ra, dec, radius of  %r" 
                          " from the phase center" %self.radiusrm)
            pmodel = self.remove_sources_within(pmodel)


        if not self.savefits:
            self.log.info(" Deleting the negative image.")
            os.system("rm -r %s"%self.negimage)

        # Set field Center
        pmodel.ra0, pmodel.dec0 = map(numpy.deg2rad, self.wcs.getCentreWCSCoords())

        return  pmodel, nmodel, self.locstep
Example #46
0
# Copyright (c) 2015 Jaakko Luttinen

from utils import plot

import matplotlib.pyplot as plt

if __name__ == "__main__":

    plt.figure()
    plot("pca", 1, maxiter=200)
    plt.xlim(0, 6)
    plt.ylim(-7500, -4500)
    plt.savefig("fig_pca_01.pdf", frameon=False)
    
    plt.figure()
    plot("pca", 2, maxiter=200)
    plt.xlim(0, 70)
    plt.ylim(-120000, -70000)
    plt.savefig("fig_pca_02.pdf", frameon=False)
    
    plt.figure()
    plot("mog", 1, maxiter=200)
    plt.xlim(0, 3)
    plt.ylim(-1000, -750)
    plt.savefig("fig_mog_01.pdf", frameon=False)

    plt.figure()
    plot("mog", 2, maxiter=200)
    plt.xlim(0, 150)
    plt.ylim(-40000, -32000)
    plt.savefig("fig_mog_02.pdf", frameon=False)
Example #47
0
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 09 00:57:33 2013

@author: Tejay Cardon
visualize pla
"""

from utils import points2weights, plot
from LinearRegression import runLR
import numpy  as np
from matplotlib.pyplot import pause
    
runs = 10
d = 1000

for i in range(0,runs):
    print "Running test # " + str(i)
    
    f = points2weights(np.random.random((2,2)) * 2 - 1)
    x = np.insert(np.random.random((d,2)) * 2 - 1,0,np.ones((1,d)), axis=1)
    truth = np.sign(np.dot(x,f))
    i,o,w = runLR(x, truth, show=True)
    
    green = x[(truth == 1), 1:]
    red = x[(truth != 1), 1:]
    plot(green, red, f, axis=311, show=True)
    pause(.5)
            loads_d = []
            for i in loads:
                loads_d.append(get_num_dict(i))

            chord_loads.append(get_50_percent(loads_d[0]))
            vserver_loads.append(get_50_percent(loads_d[1]))

            x_values.append(next(get_numbers(f)))

    plt.figure().set_size_inches(6.5,5)
    plt.xlabel("#Nodes")
    plt.ylabel("% of nodes storing 50% of data")

    from matplotlib.ticker import EngFormatter
    formatter = EngFormatter(places=0)
    plt.gca().xaxis.set_major_formatter(formatter)

    plt.ylim(0,0.5)
    plt.xlim(0,1000000)

    out_file = "intro_lb_chord.pdf"

    d1 = prepare(x_values,chord_loads)
    d2 = prepare(x_values,vserver_loads)

    d1['label'] = 'Neighbor Replication'
    d1['linestyle'] = 'dashed'
    d2['label'] = "Virtual Servers"

    plot(out_file,d1,d2)