Ejemplo n.º 1
0
def next_stump(dataset, stumps):
    # Initialize helper variables.
    cols = dataset.columns.get_values()
    y = dataset.iloc[:, 4].values
    stump = {}
    rim = []

    # ln 1: Special case, average all y(i) values for each x(i).
    #       The feature to use as root is chosen based off max change
    #       in variance.
    if len(stumps) == 0:
        # Overwrite stump value with average.
        init_subset = dataset
        init_subset['PR'] = np.average(y)
        stump = stump_from_dataset(init_subset)

    # Else build off of previous stump using gradient boosting.
    else:
        # ln 2(a): Find r(i)(m) for x(i) in data columns.
        for index, data in dataset.iterrows():
            rim.append(data['PE'] - utilities.evaluate(stumps, data))

        # ln 2(b): Fit a regression tree to targets r.
        rim_subset = dataset
        rim_subset['PR'] = rim
        stump = stump_from_dataset(rim_subset)

    stumps.append(stump)
    print(stump)
    print(utilities.mse(dataset, stumps))

    return stumps
Ejemplo n.º 2
0
def test_mse_along_random_vector():
    logger.debug("test_mse_along_random_vector")
    v1 = numpy.random.rand(100)
    v2 = numpy.random.rand(100)
    mse_utilities = utilities.mse(v1, v2)
    mse_sklearn = mean_squared_error(v1, v2)
    logger.debug("MSE(utilities) = %12.8f, MSE(sklearn) = %12.8f" %
                 (mse_utilities, mse_sklearn))
    assert mse_utilities == mse_sklearn
Ejemplo n.º 3
0
    def score(self, text_x, y_true):
        """
        :param text_x:
        :param y_true:
        :return: 返回评价指标 R2

        """
        y_pre = self.predict(text_x)

        return mse(y_true, y_pre), R2(y_true, y_pre)
Ejemplo n.º 4
0
    def testCompareSegallViscoelasticSolutionWithSimpleAnalytic(self):
        slip = 1.0
        D = 15.
        H = 15.
        x = np.arange(-100.01, 100.01)
        t_over_t_r = np.arange(0.0, 5.0)
        u = map(lambda t: constant_slip_maxwell.
                surface_solution(x, t, 1.0, D, H, slip),
                t_over_t_r)

        time0analyticsolution = (slip / pi) * np.arctan(D / x)
        self.assertTrue(utilities.mse(u[0], time0analyticsolution) < 0.0001)
        return u, x, H
Ejemplo n.º 5
0
    def testCompareVariableSlip(self):
        slip = lambda z: 1
        x = np.arange(0.05, 10.0, 0.5)
        t = np.arange(0.0, 5.0)
        alpha = 1

        u_t = map(lambda t_in: full_viscoelastic.solution(slip, x, t_in, alpha), t)

        #first compare to elastic solutions -- they should match at t=0
        u_e = full_elastic.surface_elastic_half_space(slip, x)
        # print utilities.mse(u_t[0], u_elastic)
        self.assertTrue(utilities.mse(u_t[0], u_e) < 0.0001)

        #then compare to viscoelastic solutions -- they should match at all times because slip is constant
        u_constant_slip_ve = map(lambda t_in: constant_slip_maxwell_dimensionless.solution(x, t_in, alpha), t)
        # pyp.figure(1)
        # utilities.plot_time_series_1D(x, u_t, t, show = False)
        # pyp.figure(2)
        # utilities.plot_time_series_1D(x, u_constant_slip_ve, t, show = False)
        # pyp.show()
        for i in range(len(t)):
            # print u_t[i] - u_constant_slip_ve[i]
            self.assertTrue(utilities.mse(u_t[i], u_constant_slip_ve[i]) < 0.0001)
Ejemplo n.º 6
0
    def testCompareViscoelasticSolutions(self):
        alpha = 1.0

        u_benchmark, x2, H = self.\
            testCompareSegallViscoelasticSolutionWithSimpleAnalytic()
        x = x2 / H
        t = np.arange(0.0, 5.0)
        u_estimate = map(lambda t_in:
                         constant_slip_maxwell_dimensionless.
                         solution(x, t_in, alpha), t)

        for i in range(len(t)):
            self.assertTrue(utilities.mse(u_estimate[i], u_benchmark[i])
                            < 0.01)
def main(args):

    # Arguments & parameters
    workspace = args.workspace
    task = args.task
    repeats_num = args.repeats_num
    cuda = args.cuda and torch.cuda.is_available()
    filename = args.filename

    batch_size = 200

    # Path
    if cuda:
        print('Using GPU')
    else:
        print('Using CPU')

    checkpoint = torch.load(
        os.path.join(workspace, 'main', 'checkpoints', '10000_iterations.pth'))

    figures_dir = os.path.join(workspace, filename, task,
                               'repeats_{}'.format(repeats_num), 'figures')
    create_folder(figures_dir)

    random_state = np.random.RandomState(1111)

    # Load data
    dataset = load_mnist_data()
    data = dataset['test_x']

    # Add noise
    if task == 'denoise':
        std = np.std(data)
        x = add_gaussian_noise(data, std, random_state)
        loss_func = pytorch_mse

    elif task == 'impaint':
        x = add_bars(data, random_state)
        loss_func = pytorch_mse

    elif task == 'complete':
        x = cut_half_image(data)
        loss_func = pytorch_half_mse

    # Source signal
    s = data

    # Load model
    netG = _netG()

    netG.load_state_dict(checkpoint['netG'])
    netG.cuda()

    psnr_list = []

    for n in range(len(x) // batch_size):

        print('====== mini batch {} ======'.format(n))

        batch_x = x[n * batch_size:(n + 1) * batch_size]
        batch_s = s[n * batch_size:(n + 1) * batch_size]

        # Optimizer for source and filter
        optimizer_on_input = OptimizerOnInput(netG,
                                              batch_x,
                                              batch_s,
                                              loss_func=loss_func,
                                              learning_rate=0.01,
                                              figures_dir=figures_dir)

        # Stage 1: Set several initialization and select the best initialization
        (batch_z_hat, batch_alpha_hat, batch_s_hat, batch_x_hat) = \
            optimizer_on_input.optimize_first_stage(repeats_num,
            max_iteration=201)

        # Stage 2: Use the initalization obtained from stage 1 and do the
        # optimization on source and filter
        (batch_z_hat, batch_alpha_hat, batch_s_hat, batch_x_hat) = \
            optimizer_on_input.optimize_second_stage(batch_z_hat,
            batch_alpha_hat, max_iteration=5001)

        # Calculate psnr
        elementwise_mse_loss = mse(batch_s_hat, batch_s, elementwise=True)
        elementwise_psnr_loss = mse_to_psnr(elementwise_mse_loss, max=1.)
        psnr_loss = np.mean(elementwise_psnr_loss)

        psnr_list.append(psnr_loss)
        print('*** mean psnr on {} mini batches: {}'.format(
            n, np.mean(psnr_list)))
    def optimize(self, x, s, z_hat, alpha_hat, max_iteration, figures_dir):
        '''Optimize on seed and filter. 

        Input:
          z_hat: estimated seed, (samples_num, seed_num)
          alpha_hat: estimated filters, (samples_num, filter_len, filter_len)
          s_hat: estimated source, (samples_num, 1, 28, 28)
          x_hat: estimated mixture
          max_iteration: int
          figures_dir: string

        Returns:
          z_hat: estimated seed, (samples_num, seed_num)
          alpha_hat: estimated filters, (samples_num, filter_len, filter_len)
          s_hat: estimated source, (samples_num, 1, 28, 28)
          x_hat: estimated mixture
        '''

        samples_num = x.shape[0]

        # Estimated seed
        z_hat = Variable(torch.Tensor(z_hat).cuda(), requires_grad=True)

        # Estimated filter
        alpha_hat = Variable(torch.Tensor(alpha_hat).cuda(),
                             requires_grad=True)

        # Mixture
        x = torch.Tensor(x).cuda()

        # Optimizer
        optimizer = optim.Adam([z_hat, alpha_hat],
                               lr=self.learning_rate,
                               betas=(0.9, 0.999))

        iteration = 0

        while (True):
            if iteration == max_iteration:
                break

            self.netG.eval()

            # Estimated source
            s_hat = self.netG(z_hat)

            # Estimated x
            x_hat = alpha_hat[:, None, None, None] * s_hat

            # Evaluate and plot
            if iteration % 200 == 0:
                # Calculate MSE & PSNR
                np_x = x.data.cpu().numpy()
                np_s_hat = s_hat.data.cpu().numpy()

                elementwise_mse_loss = mse(np_s_hat, s, elementwise=True)
                elementwise_psnr_loss = mse_to_psnr(elementwise_mse_loss,
                                                    max=1.)

                print('iteration: {}, mse_loss: {:4f}, psnr: {:4f}'.format(
                    iteration, np.mean(elementwise_mse_loss),
                    np.mean(elementwise_psnr_loss)))

                # Plot
                figure_path = '{}/{}_iterations.png'.format(
                    figures_dir, iteration)
                plot_image(np_x, s, np_s_hat, figure_path)
                print('Save to png to {}'.format(figure_path))

            # Loss for backpropagation
            loss = self.loss_func(x_hat.view(samples_num, -1), x)

            if self.regularize_z:
                loss += 1e-3 * (z_hat**2).mean(-1)

            # Element-wise backpropagation
            loss.backward(torch.ones(samples_num).cuda())

            optimizer.step()
            optimizer.zero_grad()

            iteration += 1

        z_hat = z_hat.data.cpu().numpy()
        alpha_hat = alpha_hat.data.cpu().numpy()
        s_hat = s_hat.data.cpu().numpy()
        x_hat = x_hat.data.cpu().numpy()

        return z_hat, alpha_hat, s_hat, x_hat
Ejemplo n.º 9
0
 def testCompareElasticSolutions(self):
     x = np.arange(0.05, 5.0, 0.05)
     displacement = full_elastic.surface_elastic_half_space(lambda z: 1.0, x)
     displacement2 = full_elastic.surface_two_layer_elastic(lambda z: 1.0,
                                                    1.0, 0.0, x)
     self.assertTrue(utilities.mse(displacement, displacement2) < 0.0001)
Ejemplo n.º 10
0
 def testmse(self):
     a = utilities.mse([1,2,3],[0,1,2])
     self.assertTrue(a == 1)
Ejemplo n.º 11
0
# Load dataset.
dataset = utilities.import_CCPP(False)
length = len(dataset.iloc[:, 1])
ratio = int(length * 0.75)
train_dataset = dataset.iloc[0:ratio]
test_dataset = dataset.iloc[ratio:]

# Initialize list of classifiers and roots.
stumps = []
losses = []
m = 150

for i in range(1, m + 1):
    print("Step %d of %d\n" % (i, m))
    stumps = next_stump(train_dataset, stumps)
    losses.append(utilities.mse(train_dataset, stumps))
    # Save trees to a file.
    f = open("trees_%d.txt" % (m), "a+")
    f.write("%s:%f:%f:%f:%f\n" %
            (stumps[-1]['attribute'], stumps[-1]['value'], stumps[-1]['left'],
             stumps[-1]['right'], losses[-1]))
    f.close()

    # Check stopping condition(s).
    if stumps[-1]['gradient'] == math.inf:
        m = i
        break

utilities.plot_loss(m, losses, train_dataset,
                    'Mean Standard Error; lambda = 0.50', 'cornflowerblue')