Beispiel #1
0
def problem4(data1,data2,data3):
    print '2.4'
    prior1 = 0.8
    prior2 = 0.1
    prior3 = 0.1
    mu1 = utils.get_mean(data1)
    mu2 = utils.get_mean(data2)
    mu3 = utils.get_mean(data3)
    cov1 = utils.get_cov(data1)
    cov2 = utils.get_cov(data2)
    cov3 = utils.get_cov(data3)
    disc_func1 = generate_gaussian_distribution_discriminant_func(mu1,cov1,prior1)
    disc_func2 = generate_gaussian_distribution_discriminant_func(mu2,cov2,prior2)
    disc_func3 = generate_gaussian_distribution_discriminant_func(mu3,cov3,prior3)
    funcs = [disc_func1, disc_func2, disc_func3]
    def whichone(functions, x):
        res = []
        for func in functions:
            res.append(func(x))
        return res.index(max(res))
    test =np.array([
                [1,2,1],
                [5,3,2],
                [0,0,0],
                [1,0,0]
                ])
    for i in range(len(test)):
        m1 = utils.mahalanobis_distance(test[i],mu1,cov1)
        m2 = utils.mahalanobis_distance(test[i],mu2,cov2)
        m3 = utils.mahalanobis_distance(test[i],mu3,cov3)
        index = whichone(funcs, test[i])
        print 'test {}:\n class {}'.format(i, index)
        print ' mahalanobis distance:\t {}, {} ,{}'.format(m1,m2,m3)
Beispiel #2
0
    def evaluate(self, key):
        args = self.param.args
        dm = self.param.volatile.dm

        dm.restart(key, args.batch_size, shuffle=False)

        result_arr = []
        while True:
            incoming = self.get_next_batch(dm, key, restart=False)
            if incoming is None:
                break
            incoming.args = Storage()

            with torch.no_grad():
                self.net.forward(incoming)
            result_arr.append(incoming.result)

        detail_arr = Storage()
        for i in args.show_sample:
            index = [i * args.batch_size + j for j in range(args.batch_size)]
            incoming = self.get_select_batch(dm, key, index)
            incoming.args = Storage()
            with torch.no_grad():
                self.net.detail_forward(incoming)
            detail_arr["show_str%d" % i] = incoming.result.show_str

        detail_arr.update(
            {key: get_mean(result_arr, key)
             for key in result_arr[0]})
        detail_arr.perplexity_avg_on_batch = np.exp(detail_arr.word_loss)
        return detail_arr
Beispiel #3
0
    def evaluate(self, key):
        args = self.param.args
        dm = self.param.volatile.dm

        dm.restart(key, args.batch_size, shuffle=False)

        result_arr = []
        while True:
            incoming = self.get_next_batch(dm, key, restart=False)
            if incoming is None:
                break
            incoming.args = Storage()

            with torch.no_grad():
                self.net.forward(incoming)
            result_arr.append(incoming.result)

        detail_arr = Storage()
        for i in args.show_sample:
            index = [i * args.batch_size + j for j in range(args.batch_size)]
            incoming = self.get_select_batch(dm, key, index)
            incoming.args = Storage()
            with torch.no_grad():
                self.net.forward(incoming)
            detail_arr["show_str%d" % i] = incoming.result.show_str

        detail_arr.update({'loss':get_mean(result_arr, 'loss'), \
         'accuracy':get_accuracy(result_arr, label_key='label', prediction_key='prediction')})
        return detail_arr
Beispiel #4
0
 def reconstruct(self, data):
     self.eval()
     with torch.no_grad():
         _, px_zs, _ = self.forward(data)
         # cross-modal matrix of reconstructions
         recons = [[get_mean(px_z) for px_z in r] for r in px_zs]
     return recons
Beispiel #5
0
def test_basic_mean(field, measures):
    """
    Scenario:
    1. Create a station
    2. Put valid measures to this station
    3. Request mean computation

    * Acceptance criteria:
    - Valid means for all values available
    """
    nb_measures = 5
    count = 0
    ret = add_station('station_mean')
    assert ret.status_code == 200

    for _ in range(nb_measures):
        ret = put_measures('station_mean', measures)
        if field in measures.keys():
            count += 1
        assert ret.status_code == 200

    _, results = get_mean('station_mean', field)

    assert results.get('mean') == measures.get(field, 0.0)
    assert results.get('nb_values') == count
Beispiel #6
0
 def get_pressure(self):
     out = self.get_values()
     out = [replace_comma(item) for item in out]
     out = [split_slash(item) for item in out]
     out = [get_mean(item) for item in out]
     # переводим в м
     out = [(item * 9.8 * 10000 + 101350) / (850 * 9.8) for item in out]
     return np.array(out)
Beispiel #7
0
 def reconstruct(self, data):
     self.eval()
     with torch.no_grad():
         qz_x = self.qz_x(*self.enc(data))
         latents = qz_x.rsample()  # no dim expansion
         px_z = self.px_z(*self.dec(latents))
         recon = get_mean(px_z)
     return recon
 def init_cloud(self, N, dispersion_factor=6):
     self.N = N
     self.cloud = [
         init_layers(self.architecture, i, dispersion_factor)
         for i in range(N)
     ]
     self.cloud_mean = get_mean(self.cloud)
     cloudf, _, _ = flatten_weights(self.cloud, self.N)
     self.cloud_var = get_var(cloudf)
Beispiel #9
0
def test_unknown_station(field):
    """
    Scenario:
    1. Get mean from an unknown station

    * Acceptance criteria:
    - Server should reject the request
    """

    err, _ = get_mean("Unknown", field)
    assert err == 404, err
Beispiel #10
0
def to_resoult(date, play_type, nums):

    data_dict = {
        "种类": play_type,
        "日期": date,
        "平均数": utils.get_mean(nums),
        "中位数": utils.get_median(nums),
        "众数": utils.get_mode(nums)
    }
    data = pd.DataFrame(data_dict, index=[0])

    return data
    def __init__(self, data, threshhold_energy):
        self.mean = utils.get_mean(data)  # (1, D)
        t = time.time()
        self.covariance_matrix = utils.get_covariance_matrix(data)  # (D, D)
        print(self.covariance_matrix.shape, " Cov matrix calculated in ",
              time.time() - t)
        t = time.time()
        self.eigen_values, self.eigen_vectors = \
            utils.do_eigenvalue_decomposition(self.covariance_matrix)
        print(self.eigen_vectors.shape, self.eigen_values.shape,
              "Eigen value calculated in ",
              time.time() - t)
        t = time.time()
        sorted_eigenvalues = []
        for i, val in enumerate(self.eigen_values):
            sorted_eigenvalues.append((val, i))
        sorted_eigenvalues.sort(reverse=True)  # (D, D)
        sorted_eigenvectors = self.eigen_vectors.copy()
        for i in range(sorted_eigenvectors.shape[1]):
            sorted_eigenvectors[:,
                                i] = self.eigen_vectors[:,
                                                        sorted_eigenvalues[i]
                                                        [1]]
        # print("after sorting")
        # print(sorted_eigenvalues)
        # print(sorted_eigenvectors)
        eigen_energy = []
        cum_sum = 0
        for i, val in enumerate(sorted_eigenvalues):
            cum_sum += val[0]
            eigen_energy.append(cum_sum)
        eigen_energy /= eigen_energy[len(sorted_eigenvalues) - 1]

        # print("Eigen Energy", eigen_energy)
        thresh = -1

        for i, val in enumerate(eigen_energy):
            if val >= threshhold_energy:
                thresh = i
                break
        if thresh == -1:
            thresh = len(eigen_energy) - 1

        outfile = TemporaryFile()
        self.projection_matrix = sorted_eigenvectors[:, :thresh + 1]  # (D, K)
        np.save(outfile, np.asarray(self.projection_matrix))
Beispiel #12
0
def test_unknown_field(measures):
    """
    Scenario:
    1. Create a station
    2. Put measures with bad fields to this station
    3. Request mean computation

    * Acceptance criteria:
    - It should fail
    """
    nb_measures = 5
    ret = add_station('station_mean')
    assert ret.status_code == 200

    for _ in range(nb_measures):
        _ = put_measures('station_mean', measures)

    err, _ = get_mean('station_mean', 'Unknown')
    assert err == 400
Beispiel #13
0
 def optimizer(func, x0, fprime, training_data, callback):
     n1 = ImageShape[0]
     n2 = ImageShape[1]
     diff = (winSize[0] - 1) // 2
     valid_windows = int(n1 * n2 - diff * 2 * (n1 + n2) + 4 * diff * diff)
     print('* Optimizer method. ')
     training_set_idx = 0
     n_samples = 1000
     w_t = x0
     m_t = 0
     x_image, t_image, mask_image = utils.get_images(
         ImageShape, PatternShape, winSize, TrainingSet[training_set_idx])
     x_mean = utils.get_mean(x_image, winSize, ImageShape[2], ImageShape)
     x_std = utils.get_std(x_image, winSize, ImageShape[2], ImageShape,
                           x_mean)
     train_data = sampleData(valid_windows, n_samples, x_image, t_image,
                             winSize, ImageShape, x_mean, x_std)
     e_t = func(w_t.astype('float32'), *train_data)
     e_it = numpy.zeros(num_epochs)
     de_it = numpy.zeros(num_epochs)
     auc_it = numpy.zeros(num_epochs)
     auc_x = numpy.zeros(num_epochs)
     m_r = 0.99
     it2 = 0
     for i in numpy.arange(num_epochs):
         dedw = fprime(w_t.astype('float32'), *train_data)
         g_t = -dedw
         l_r = learning_rate
         m_t = m_r * m_t + g_t * l_r
         dw_t = m_r * m_t + g_t * l_r
         w_t = w_t + dw_t
         e_t = func(w_t.astype('float32'), *train_data)
         e_it[i] = e_t
         if (i % 50 == 0):
             train_data = sampleData(valid_windows, n_samples, x_image,
                                     t_image, winSize, ImageShape, x_mean,
                                     x_std)
             print("i: {}, e_t: {}, time: {}".format(i, e_t, time.ctime()))
         de_it[i] = numpy.abs(dw_t).mean()
         if ((i > 10) and (i % 400 == 0)):
             training_set_idx = (training_set_idx + 1) % TrainingSet.size
             x_image, t_image, mask_image = utils.get_images(
                 ImageShape, PatternShape, winSize,
                 TrainingSet[training_set_idx])
             x_mean = utils.get_mean(x_image, winSize, ImageShape[2],
                                     ImageShape)
             x_std = utils.get_std(x_image, winSize, ImageShape[2],
                                   ImageShape, x_mean)
         if ((i > 10) and (i % 800 == 0)):
             numpy.save('../data/w_t.npy', w_t)
             sio.savemat(
                 '../data/BVS_data.mat', {
                     'depth': depth,
                     'width': width,
                     'drop_in': drop_in,
                     'drop_hid': drop_hid,
                     'w_t': w_t
                 })
             y_preds = utils.get_predictions(x_image, ImageShape,
                                             PatternShape, winSize,
                                             output_model, x_mean, x_std)
             t_data = utils.sliding_window(t_image,
                                           winSize,
                                           dim=1,
                                           output=1)
             auc_it[it2] = getAUC(w_t.astype('float32'), y_preds, t_data)
             print('AUC: {}'.format(auc_it[it2]))
             auc_x[it2] = i
             it2 += 1
             # debug images
             fig, ax = plt.subplots(nrows=1, ncols=1)
             ax.plot(numpy.arange(i), e_it[0:i], 'r-')
             fig.savefig('debug/error.png')
             plt.close(fig)
             fig, ax = plt.subplots(nrows=1, ncols=1)
             ax.plot(numpy.arange(i), de_it[0:i], 'g-')
             fig.savefig('debug/dw_t.png')
             plt.close(fig)
             fig, ax = plt.subplots(nrows=1, ncols=1)
             ax.plot(auc_x[0:it2], auc_it[0:it2], 'b-')
             fig.savefig('debug/auc.png')
             plt.close(fig)
             print('Show test imge... ')
             output_image = utils.reconstruct_image(
                 y_preds, w=winSize, PatternShape=PatternShape, alpha=alpha)
             img = numpy.floor(output_image * 255)
             cv2.imwrite(
                 'debug/image-last-{}.png'.format(
                     TrainingSet[training_set_idx]), img)
Beispiel #14
0
if __name__ == '__main__':
    opt = parse_opts()
    if opt.root_path != '':
        opt.video_path = os.path.join(opt.root_path, opt.video_path)
        opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
        opt.result_path = os.path.join(opt.root_path, opt.result_path)
        if opt.resume_path:
            opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
        if opt.pretrain_path:
            opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
    opt.scales = [opt.initial_scale]
    for i in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
    opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
    opt.std = get_std(opt.norm_value)
    print(opt)
    with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
        json.dump(vars(opt), opt_file)

    torch.manual_seed(opt.manual_seed)

    model, parameters = generate_model(opt)
    print(model)
    criterion = nn.CrossEntropyLoss()
    if not opt.no_cuda:
        criterion = criterion.cuda()

    if opt.no_mean_norm and not opt.std_norm:
        norm_method = transforms.Normalize([0, 0, 0], [1, 1, 1])
Beispiel #15
0
def bm3d_1st_step_block_mean(sigma, img_noisy, nHard, kHard, NHard, pHard, lambdaHard3D, tauMatch, useSD, tau_2D,
                  block_mean=False):
    height, width = img_noisy.shape[0], img_noisy.shape[1]

    row_ind = ind_initialize(height - kHard + 1, nHard, pHard)
    column_ind = ind_initialize(width - kHard + 1, nHard, pHard)

    kaiserWindow = get_kaiserWindow(kHard)
    ri_rj_N__ni_nj, threshold_count = precompute_BM(img_noisy, kHW=kHard, NHW=NHard, nHW=nHard, tauMatch=tauMatch)
    group_len = int(np.sum(threshold_count))
    group_3D_table = np.zeros((group_len, kHard, kHard))
    weight_table = np.zeros((height, width))

    all_patches = image2patches(img_noisy, k=kHard, p=pHard)  # i_j_ipatch_jpatch__v
    if tau_2D == 'DCT':
        fre_all_patches = dct_2d_forward(all_patches)
    else:  # 'BIOR'
        fre_all_patches = bior_2d_forward(all_patches)
    fre_all_patches = fre_all_patches.reshape((height - kHard + 1, height - kHard + 1, kHard, kHard))

    acc_pointer = 0
    for i_r in row_ind:
        for j_r in column_ind:
            nSx_r = threshold_count[i_r, j_r]
            group_3D = build_3D_group(fre_all_patches, ri_rj_N__ni_nj[i_r, j_r], nSx_r)
            if block_mean:
                group_3D, weight = get_mean(group_3D)
            else:
                group_3D, weight = ht_filtering_hadamard(group_3D, sigma, lambdaHard3D, not useSD)
            group_3D = group_3D.transpose((2, 0, 1))
            group_3D_table[acc_pointer:acc_pointer + nSx_r] = group_3D
            acc_pointer += nSx_r

            if useSD:
                weight = sd_weighting(group_3D)

            weight_table[i_r, j_r] = weight

    if tau_2D == 'DCT':
        group_3D_table = dct_2d_reverse(group_3D_table)
    else:  # 'BIOR'
        group_3D_table = bior_2d_reverse(group_3D_table)

    numerator = np.zeros_like(img_noisy, dtype=np.float64)
    denominator = np.zeros_like(img_noisy, dtype=np.float64)
    acc_pointer = 0
    for i_r in row_ind:
        for j_r in column_ind:
            nSx_r = threshold_count[i_r, j_r]
            N_ni_nj = ri_rj_N__ni_nj[i_r, j_r]
            group_3D = group_3D_table[acc_pointer:acc_pointer + nSx_r]
            acc_pointer += nSx_r
            weight = weight_table[i_r, j_r]
            for n in range(nSx_r):
                ni, nj = N_ni_nj[n]
                patch = group_3D[n]

                numerator[ni:ni + kHard, nj:nj + kHard] += patch * kaiserWindow * weight
                denominator[ni:ni + kHard, nj:nj + kHard] += kaiserWindow * weight

    img_basic = numerator / denominator
    return img_basic
Beispiel #16
0
def train(args):

    debug = args.debug
    gpu = args.gpu

    Nhidden = args.Nhidden  # LSTM hidden nodes

    Nbatches = args.Nbatches  # Training batches
    BatchSize = args.BatchSize  # Training batch size
    ChunkSize = args.ChunkSize  # The length for accumulating loss in training
    SubseqLen = args.SubseqLen  # Split the training sequence into subsequences
    LearningRate = args.LearningRate  # Learning rate
    Eps = args.Eps  # Eps used in Adam optimizer
    AMSGrad = args.AMSGrad  # Use AMSGrad in Adam
    LRdecrease = args.LRdecrease  # Decrease learning rate

    save_model_dir = args.save_model_dir
    save_model_name = args.save_model_name

    normal_data_dir = args.normal_data_dir
    normal_data_name_train = args.normal_data_name_train
    val_and_ref_name = args.normal_data_name_val_and_ref

    RED_collection_len = args.RED_collection_len
    RED_points = args.RED_points
    Pvalue_th = args.Pvalue_th

    if args.dummydata:
        training_normal_data, val_normal_data, ref_normal_data = (
            loaddata.load_normal_dummydata())
    else:
        _, training_normal_data, _ = (
            loaddata.load_data_split(
                data_dir=normal_data_dir,
                file_name=normal_data_name_train,
                # The first few readings could be unstable, remove it.
                split=(0.1, 0.8, 0.1)))

        _, ref_normal_data, val_normal_data = loaddata.load_data_split(
            data_dir=normal_data_dir,
            file_name=val_and_ref_name,
            # The first few readings could be unstable, remove it.
            split=(0.1, 0.45, 0.45))

    training_normal_data_mean = utils.get_mean(training_normal_data)
    training_normal_data_std = utils.get_std(training_normal_data)

    Nfeatures = training_normal_data.shape[1]
    AnomalyDetector = detector.Detector(input_size=Nfeatures,
                                        hidden_size=Nhidden,
                                        th=Pvalue_th)
    AnomalyDetector.set_mean(training_normal_data_mean)
    AnomalyDetector.set_std(training_normal_data_std)

    training_normal_data = AnomalyDetector.normalize(training_normal_data)
    val_normal_data = AnomalyDetector.normalize(val_normal_data)
    ref_normal_data = torch.tensor(AnomalyDetector.normalize(ref_normal_data))

    training_normal_wrapper = SeqGenerator.SeqGenerator(training_normal_data)
    val_normal_wrapper = SeqGenerator.SeqGenerator(val_normal_data)
    training_normal_len = len(training_normal_data)

    MSELossLayer = torch.nn.MSELoss()
    optimizer = optim.Adam(params=AnomalyDetector.parameters(),
                           lr=LearningRate,
                           eps=Eps,
                           amsgrad=True)

    if gpu:
        ref_normal_data = ref_normal_data.cuda()
        MSELossLayer = MSELossLayer.cuda()
        AnomalyDetector = AnomalyDetector.cuda()

    if debug:
        for name, para in AnomalyDetector.named_parameters():
            print(name, para.size())

    for batch in range(Nbatches):

        def step_fn(data_batch, is_train=True):
            t = 0
            init_state = (torch.zeros(1, BatchSize, Nhidden),
                          torch.zeros(1, BatchSize, Nhidden))

            if gpu:
                init_state = (init_state[0].cuda(), init_state[1].cuda())
                data_batch = data_batch.cuda()

            state = init_state
            loss_list = []
            while t + ChunkSize + 1 < data_batch.shape[0]:
                if is_train:
                    AnomalyDetector.zero_grad()

                pred, state = AnomalyDetector.forward(
                    data_batch[t:t + ChunkSize, :, :], state)
                truth = data_batch[t + 1:t + ChunkSize + 1, :, :]

                loss = MSELossLayer(pred, truth)

                if debug:
                    print("pred.size ", pred.size(), "truth.size ",
                          truth.size())

                if is_train:
                    loss.backward()
                    optimizer.step()

                if gpu:
                    loss_list.append(loss.detach().cpu().numpy())
                else:
                    loss_list.append(loss.detach().numpy())

                state = (state[0].detach(), state[1].detach())
                t += ChunkSize
            return loss_list

        training_batch = torch.tensor(
            training_normal_wrapper.next(BatchSize, SubseqLen))
        train_loss_list = step_fn(training_batch, is_train=True)
        val_batch = torch.tensor(val_normal_wrapper.next(BatchSize, 2000))
        val_loss_list = step_fn(val_batch, is_train=False)
        print("Batch", batch, "Training loss", np.mean(train_loss_list),
              "Val loss", np.mean(val_loss_list))

        if (batch + 1) % LRdecrease == 0:
            LearningRate = LearningRate / 2.0
            utils.setLearningRate(optimizer, LearningRate)

    print("Training Done")
    print("Getting RED")

    AnomalyDetector.set_RED_config(RED_collection_len=RED_collection_len,
                                   RED_points=RED_points)
    AnomalyDetector.collect_ref_RED(ref_normal_data, gpu)

    if not os.path.exists(save_model_dir):
        os.makedirs(save_model_dir)
    torch.save(AnomalyDetector.cpu(), save_model_dir + save_model_name)
    AnomalyDetector.jitSaveTorchModule(save_model_dir)
    print("Model saved")
Beispiel #17
0
    plot_time_series(fcff, 'FCFF', img_save=os.path.join(image_dir, 'fcff'))
    plot_time_series(eps, 'EPS', img_save=os.path.join(image_dir, 'eps'))
    plot_time_series(working_capital_diff + capital_expenditure,
                     'W + C',
                     img_save=os.path.join(image_dir, 'working_cap_exp'))
    plot_time_series(depreciation,
                     'Depreciation',
                     img_save=os.path.join(image_dir, 'depreciation'))

    plot_time_series([nopat, depreciation],
                     'Depreciation compares to Nopat',
                     img_save=os.path.join(image_dir, 'nopat_2_depreciation'))
    plot_time_series(
        [(depreciation / operating_income_before_depreciation),
         get_ones(depreciation),
         get_mean(depreciation / operating_income_before_depreciation)],
        'Ratio of depreciation to income before depreciation',
        unit='%',
        img_save=os.path.join(image_dir,
                              'depreciation_2_income_before_depreciation'))
    plot_time_series([(capital_expenditure / nopat).clip(0, 10),
                      get_ones(depreciation)],
                     'Ratio of capital expenditure to nopat',
                     unit='%',
                     img_save=os.path.join(image_dir,
                                           'nopat_2_capital_exp_ratio'))
    plot_time_series(
        depreciation / operating_income_before_depreciation,
        'Ratio of depreciation to operating income before depreciation',
        unit='%',
        img_save=os.path.join(image_dir, 'depreciation_to_income'))
Beispiel #18
0
def get_training_set(dataset):
    assert dataset in ['ucf101', 'hmdb51']
    if dataset == 'hmdb51':
        with open('/home/jingjing/zhipeng/adv-attack-video/code2/datasets/c3d_dataset/hmdb51_params.pkl', 'rb') as ipt:
            opt = pickle.load(ipt)
        opt = DictToAttr(opt)
    elif dataset == 'ucf101':
        with open('/home/jingjing/zhipeng/adv-attack-video/code2/datasets/c3d_dataset/ucf101_params.pkl', 'rb') as ipt:
            opt = pickle.load(ipt)
        opt = DictToAttr(opt)
    
    opt.dataset = dataset
    
    # transforms begin
    opt.scales = [opt.initial_scale]
    for i in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    
    opt.mean = get_mean(opt.norm_value, dataset=opt.dataset)
    opt.std = get_std(opt.norm_value, dataset=opt.dataset)
    

    if opt.no_mean_norm and not opt.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not opt.std_norm:
        norm_method = Normalize(opt.mean, [1, 1, 1])
    else:
        norm_method = Normalize(opt.mean, opt.std)

    torch.manual_seed(opt.manual_seed)    
    assert opt.train_crop in ['random', 'corner', 'center']
    if opt.train_crop == 'random':
        crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
    elif opt.train_crop == 'corner':
        crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
    elif opt.train_crop == 'center':
        crop_method = MultiScaleCornerCrop(
            opt.scales, opt.sample_size, crop_positions=['c'])
    spatial_transform = spatial_Compose([
        crop_method,
        RandomHorizontalFlip(),
        ToTensor(opt.norm_value), norm_method
        ])
    temporal_transform = TemporalRandomCrop(opt.sample_duration)
    target_transform = ClassLabel()
    # transforms end

    if opt.dataset == 'ucf101':
        try:
            training_data = UCF101(
            opt.video_path,
            opt.annotation_path,
            'training',
            input_style = 'rgb',
            spatial_transform=spatial_transform,
            temporal_transform=temporal_transform,
            target_transform=target_transform,
            )
        except:
            training_data = UCF101(
            '/home/jingjing/zhipeng/adv-attack-video/data/UCF101-jpg',
            '/home/jingjing/zhipeng/adv-attack-video/data/UCF101-annotation/ucfTrainTestlist/ucf101_01.json',
            'training',
            input_style = 'rgb',
            spatial_transform=spatial_transform,
            temporal_transform=temporal_transform,
            target_transform=target_transform,
            )
    elif opt.dataset == 'hmdb51':
        try:
            training_data = HMDB51(
            opt.video_path,
            opt.annotation_path,
            'training',
            input_style = 'rgb',
            spatial_transform=spatial_transform,
            temporal_transform=temporal_transform,
            target_transform=target_transform,
            )
        except:
            training_data = HMDB51(
                '/home/jingjing/zhipeng/adv-attack-video/data/hmdb51-jpg',
                '/home/jingjing/zhipeng/adv-attack-video/data/hmdb51-annotation/hmdb51_1.json',
                'training',
                input_style = 'rgb',
                spatial_transform=spatial_transform,
                temporal_transform=temporal_transform,
                target_transform=target_transform,
                )
    return training_data
Beispiel #19
0
def get_test_set(dataset):
    assert dataset in ['ucf101', 'hmdb51']
    if dataset == 'hmdb51':
        with open('/home/jingjing/zhipeng/adv-attack-video/code2/datasets/c3d_dataset/hmdb51_params.pkl', 'rb') as ipt:
            opt = pickle.load(ipt)
        opt = DictToAttr(opt)
    elif dataset == 'ucf101':
        with open('/home/jingjing/zhipeng/adv-attack-video/code2/datasets/c3d_dataset/ucf101_params.pkl', 'rb') as ipt:
            opt = pickle.load(ipt)
        opt = DictToAttr(opt)
        
    opt.dataset = dataset
    
    # transform begin
    opt.mean = get_mean(opt.norm_value, dataset)
    opt.std = get_std(opt.norm_value, dataset)
    
    torch.manual_seed(opt.manual_seed) 
    
    if opt.no_mean_norm and not opt.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not opt.std_norm:
        norm_method = Normalize(opt.mean, [1, 1, 1])
    else:
        norm_method = Normalize(opt.mean, opt.std)
    
    spatial_transform = spatial_Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
    temporal_transform = LoopPadding(opt.sample_duration)
    target_transform = target_Compose([VideoID(),ClassLabel()])
    # transform end
    
    if opt.dataset == 'ucf101':
        try:
            test_data = UCF101(
            opt.video_path,
            opt.annotation_path,
            'validation',
            input_style = 'rgb',
            n_samples_for_each_video = 3,
            spatial_transform=spatial_transform,
            temporal_transform=temporal_transform,
            target_transform=target_transform,
            sample_duration=opt.sample_duration)
        except:
            test_data = UCF101(
            '/home/jingjing/zhipeng/adv-attack-video/data/UCF101-jpg',
            '/home/jingjing/zhipeng/adv-attack-video/data/UCF101-annotation/ucfTrainTestlist/ucf101_01.json',
            'validation',
            input_style = 'rgb',
            n_samples_for_each_video = 3,
            spatial_transform=spatial_transform,
            temporal_transform=temporal_transform,
            target_transform=target_transform,
            sample_duration=opt.sample_duration)
    elif opt.dataset == 'hmdb51':
        try:
            test_data = HMDB51(
                opt.video_path,
                opt.annotation_path,
                'validation',
                input_style = 'rgb',
                n_samples_for_each_video = 3,
                spatial_transform=spatial_transform,
                temporal_transform=temporal_transform,
                target_transform=target_transform,
                sample_duration=opt.sample_duration)
        except:
            test_data = HMDB51(
                '/home/jingjing/zhipeng/adv-attack-video/data/hmdb51-jpg',
                '/home/jingjing/zhipeng/adv-attack-video/data/hmdb51-annotation/hmdb51_1.json',
                'validation',
                input_style = 'rgb',
                n_samples_for_each_video = 3,
                spatial_transform=spatial_transform,
                temporal_transform=temporal_transform,
                target_transform=target_transform,
                sample_duration=opt.sample_duration)
    return test_data
Beispiel #20
0
def bm3d_2nd_step_block_mean(sigma,
                             img_noisy,
                             img_basic,
                             nWien,
                             kWien,
                             NWien,
                             pWien,
                             tauMatch,
                             useSD,
                             tau_2D,
                             block_mean=False):
    height, width = img_noisy.shape[0], img_noisy.shape[1]

    row_ind = ind_initialize(height - kWien + 1, nWien, pWien)
    column_ind = ind_initialize(width - kWien + 1, nWien, pWien)

    kaiserWindow = get_kaiserWindow(kWien)
    ri_rj_N__ni_nj, threshold_count = precompute_BM(img_basic,
                                                    kHW=kWien,
                                                    NHW=NWien,
                                                    nHW=nWien,
                                                    tauMatch=tauMatch)
    group_len = int(np.sum(threshold_count))
    group_3D_table = np.zeros((group_len, kWien, kWien))
    weight_table = np.zeros((height, width))

    noisy_patches = image2patches(img_noisy, k=kWien,
                                  p=pWien)  # i_j_ipatch_jpatch__v
    basic_patches = image2patches(img_basic, k=kWien,
                                  p=pWien)  # i_j_ipatch_jpatch__v
    if tau_2D == 'DCT':
        fre_noisy_patches = dct_2d_forward(noisy_patches)
        fre_basic_patches = dct_2d_forward(basic_patches)
    else:  # 'BIOR'
        fre_noisy_patches = bior_2d_forward(noisy_patches)
        fre_basic_patches = bior_2d_forward(basic_patches)

    fre_noisy_patches = fre_noisy_patches.reshape(
        (height - kWien + 1, height - kWien + 1, kWien, kWien))
    fre_basic_patches = fre_basic_patches.reshape(
        (height - kWien + 1, height - kWien + 1, kWien, kWien))

    acc_pointer = 0
    for i_r in row_ind:
        for j_r in column_ind:
            nSx_r = threshold_count[i_r, j_r]
            group_3D_img = build_3D_group(fre_noisy_patches,
                                          ri_rj_N__ni_nj[i_r, j_r], nSx_r)
            group_3D_est = build_3D_group(fre_basic_patches,
                                          ri_rj_N__ni_nj[i_r, j_r], nSx_r)
            if block_mean:
                group_3D, weight = get_mean(group_3D_img)
            else:
                group_3D, weight = wiener_filtering_hadamard(
                    group_3D_img, group_3D_est, sigma, not useSD)
            group_3D = group_3D.transpose((2, 0, 1))

            group_3D_table[acc_pointer:acc_pointer + nSx_r] = group_3D
            acc_pointer += nSx_r

            if useSD:
                weight = sd_weighting(group_3D)

            weight_table[i_r, j_r] = weight

    if tau_2D == 'DCT':
        group_3D_table = dct_2d_reverse(group_3D_table)
    else:  # 'BIOR'
        group_3D_table = bior_2d_reverse(group_3D_table)

    # for i in range(1000):
    #     patch = group_3D_table[i]
    #     print(i, '----------------------------')
    #     print(patch)
    #     cv2.imshow('', patch.astype(np.uint8))
    #     cv2.waitKey()

    numerator = np.zeros_like(img_noisy, dtype=np.float64)
    denominator = np.zeros_like(img_noisy, dtype=np.float64)
    acc_pointer = 0
    for i_r in row_ind:
        for j_r in column_ind:
            nSx_r = threshold_count[i_r, j_r]
            N_ni_nj = ri_rj_N__ni_nj[i_r, j_r]
            group_3D = group_3D_table[acc_pointer:acc_pointer + nSx_r]
            acc_pointer += nSx_r
            weight = weight_table[i_r, j_r]
            for n in range(nSx_r):
                ni, nj = N_ni_nj[n]
                patch = group_3D[n]

                numerator[ni:ni + kWien,
                          nj:nj + kWien] += patch * kaiserWindow * weight
                denominator[ni:ni + kWien,
                            nj:nj + kWien] += kaiserWindow * weight

    img_denoised = numerator / denominator
    return img_denoised
Beispiel #21
0
def problem2(data1,data2,data3):
    print '2.2(a):'
    x1 = [];x2=[]
    for i in range(10):
        x1.append(data1[i][0])
        x2.append(data2[i][0])
    x1 = np.array(x1)
    x2 = np.array(x2)
    mu1 = np.mean(x1)
    mu2 = np.mean(x2)
    cov1 = np.cov(x1)
    cov2 = np.cov(x2)
    mu1 = np.array([mu1])
    mu2 = np.array([mu2])
    cov1 = np.array([cov1])
    cov2 = np.array([cov2])
    print type(mu1)
    disc_func1 = generate_gaussian_distribution_discriminant_func(mu1,cov1,0.5)
    disc_func2 = generate_gaussian_distribution_discriminant_func(mu2,cov2,0.5)
    s11, s12, s21,s22 = 0, 0,0,0
    for i in range(10):
        if disc_func1(x1[i]) > disc_func2(x1[i]):
            s11+=1
        else:
            s12+=1
        if disc_func1(x2[i]) > disc_func2(x2[i]):
            s22+=1
        else:
            s21+=1
    print 'problem 2.2(b):\n error ratio:\t{}\n accuracy:\t{}'.format( \
                                (s12+s21)/(s11+s12+s21+s22), \
                                (s11+s22)/(s11+s12+s21+s22) )
    print 'bhattacharyya bound:\t{}'.format(error_bound.bhattacharyya_bound(0.5,0.5,mu1,mu2,cov1,cov2))
    print '======================================================'
    print '2.2(e):'
    mu1 = utils.get_mean(data1)
    mu2 = utils.get_mean(data2)
    mu3 = utils.get_mean(data3)
    cov1 = utils.get_cov(data1)
    cov2 = utils.get_cov(data2)
    cov3 = utils.get_cov(data3)
    print type(mu1)
    disc_func1 = generate_gaussian_distribution_discriminant_func(mu1,cov1,0.3)
    disc_func2 = generate_gaussian_distribution_discriminant_func(mu2,cov2,0.4)
    disc_func3 = generate_gaussian_distribution_discriminant_func(mu3,cov3,0.3)
    clf = np.zeros((3,3))
    funcs = [disc_func1, disc_func2, disc_func3]
    def whichone(functions, x):
        res = []
        for func in functions:
            res.append(func(x))
        return res.index(max(res))

    for i in range(10):
        index1 = whichone(funcs, data1[i])
        clf[0][index1]+=1
        index2 = whichone(funcs, data2[i])
        clf[1][index2]+=1
        index3 = whichone(funcs, data3[i])
        print index3
        clf[2][index3]+=1
    accuracy = (clf[0][0]+clf[1][1]+clf[2][2])/np.sum(clf)
    print 'problem 2.2(e):\n error ratio:\t{}\n accuracy:\t{}'.format( \
                                1-accuracy, \
                                accuracy )
    print 'bhattacharyya bound:\t{}'.format(error_bound.bhattacharyya_bound(0.3,0.4,mu1,mu2,cov1,cov2))
def train_nn(X, Y, cloud, nn_architecture, method, max_epochs, n_batches,
             batch_size, learning_rate, cost_type, N, kernel_a, alpha_init,
             alpha_rate, beta, gamma, verbose, var_epsilon):

    # initiation of lists storing the cost history
    cost_history = []
    cost_history_mean = []

    alpha = alpha_init
    elapsed_epochs = 0
    print("\nTraining started...")

    # performing calculations for subsequent iterations
    for i in range(max_epochs):

        for batch in range(n_batches):

            start = batch * batch_size
            end = start + batch_size

            Y_hat = []
            costs = []
            cache = []
            grads = []

            for j in range(N):

                # step forward
                Y_hat_temp, cache_temp = full_forward_propagation(
                    X[:, start:end], cloud[j], nn_architecture)
                Y_hat.append(Y_hat_temp)
                cache.append(cache_temp)

                # calculating cost and saving it to history
                costj = get_cost_value(Y_hat[j], Y[:, start:end], cost_type)
                costs.append(costj)
                # step backward - calculating gradient
                if method in ["gradient_descent", "swarm"]:
                    gradsj = full_backward_propagation(Y_hat[j], Y[:,
                                                                   start:end],
                                                       cache[j], cloud[j],
                                                       nn_architecture)
                    grads.append(gradsj)

            if method == "swarm":
                cloud, cloud_var = update_nn_weights(cloud, grads,
                                                     learning_rate, N,
                                                     kernel_a, alpha, beta,
                                                     gamma)
            elif method == "swarm_derivfree":
                cloud, cloud_var = update_nn_weights_derivative_free(
                    cloud, costs, learning_rate, N, kernel_a, alpha, beta,
                    gamma)
            elif method == "gradient_descent":
                cloud, cloud_var = update_gd(cloud[0], grads[0],
                                             nn_architecture, learning_rate)
            else:
                raise Exception("No method found")

            #end of iteration
            cost_history.append(costs)

            #mean particle position and its cost
            cloud_mean = get_mean(cloud)
            Y_hat_mean, _ = full_forward_propagation(X[:,
                                                       start:end], cloud_mean,
                                                     nn_architecture)
            cost_mean = get_cost_value(Y_hat_mean, Y[:, start:end], cost_type)
            cost_history_mean.append(cost_mean)

        #end of epoch----------------
        cloud_var = np.mean(
            cloud_var)  #mean of variances along dimensions of parameter space

        if (verbose):
            print(
                "Iteration: {:05} - Cloud mean cost: {:.5f} - Cloud variance: {:.5f}"
                .format(i, cost_mean, cloud_var))

        alpha += alpha_rate
        elapsed_epochs += 1

        if cloud_var < var_epsilon:
            print("Convergence achieved - particles are localized")
            break

    if i == (max_epochs - 1): print("Maximum amount of epochs reached")

    print("\nFunction value at cloud mean: " + str(cost_mean))
    print("Cost function evaluated {:01} times".format(
        int(n_batches * elapsed_epochs * N)))

    return cloud, cloud_mean, cloud_var, cost_history, cost_history_mean
 def set_cloud(self, cloud):
     self.cloud = cloud
     self.cloud_mean = get_mean(self.cloud)
     self.cloud_var = get_var(self.cloud)
Beispiel #24
0
def main():
    print('* Start! ')
    print('* Loading config.json')
    with open('config.json') as json_data:
        config = json.load(json_data)
        depth = int(config["layers"])
        width = int(config["neurons_by_layer"])
        drop_in = float(config["dropout_input"])
        drop_hid = float(config["dropout_hidden"])
        num_epochs = int(config["num_epochs"])    
        winSide = int(config["window_side"])    
        ImageShape = config["image_shape"]
        ImageShape = (int(ImageShape[0]),int(ImageShape[1]),int(ImageShape[2]))
        ValidationSet = utils.getValues(config["validation_set"])
        alpha = float(config["alpha"])
    # Other global variables
    PatternShape   	= (ImageShape[0],ImageShape[1])
    winSize        	    = (winSide,winSide)
    n_features     	    = ImageShape[2]*(winSide**2)
    print("* Building model and compiling functions...")
    input_var = T.matrix('inputs')
    target_var = T.ivector('targets')
    network = utils.build_custom_mlp(n_features, input_var, depth, width, drop_in, drop_hid)
    prediction = lasagne.layers.get_output(network)
    t2 = theano.tensor.extra_ops.to_one_hot(target_var, 2, dtype='int32')
    error = lasagne.objectives.categorical_crossentropy(prediction, t2)
    params = lasagne.layers.get_all_params(network, trainable=True)
    output_model = theano.function([input_var], prediction)
    # compilation
    comp_params_updater = []
    for w in params:
        w_in = T.matrix()
        if(w_in.type != w.type):
            w_in = T.vector()
        w_update = theano.function([w_in], updates=[(w, w_in)])
        comp_params_updater = comp_params_updater + [w_update]
    
    '''
    Method that receives the new set of weights 
    and inserts them in the net. 
    '''
    def params_updater(all_w):
        idx_init = 0
        params_idx = 0
        for w_updater in comp_params_updater:
            w = params[params_idx]
            params_idx += 1
            w_value_pre = w.get_value()
            w_act = all_w[idx_init:idx_init+w_value_pre.size]
            w_value = w_act.reshape(w_value_pre.shape)
            idx_init += w_value_pre.size
            w_updater(w_value)
        return
    
    w_t = numpy.load('../data/w_t.npy')
    params_updater(w_t)
    print('* Show test images... ')
    test_n = ValidationSet
    test_idx    = numpy.arange(test_n.size)
    accuracy = numpy.zeros(test_n.size,)
    for idx in test_idx:
        print('* Test image: {}'.format(idx))
        x_image,  t_image,  mask_image = utils.get_images(ImageShape, PatternShape, winSize, test_n[idx])
        print('* get_mean. ')
        x_mean = utils.get_mean(x_image,  winSize,  ImageShape[2],  ImageShape)
        print('* get_std. ')
        x_std = utils.get_std(x_image,  winSize,  ImageShape[2],  ImageShape,  x_mean)
        print('* get_predictions. ')
        y_preds = utils.get_predictions(x_image, ImageShape, PatternShape, winSize, output_model,  x_mean, x_std)
        output_image = utils.reconstruct_image(y_preds,w=winSize, PatternShape=PatternShape, alpha=alpha)
        t_image = t_image.astype(numpy.float_)/255
        mask_image = mask_image.astype(numpy.float_)/255
        error_image,  accuracy[idx] = utils.get_error_image(output_image, t_image, mask_image)
        print('Accuracy[{}]: {}'.format(test_n[idx], accuracy[idx]))
        error_image = numpy.floor(error_image*255)
        cv2.imwrite('debug/error_image-'+str(test_n[idx])+'.png',error_image)
        # Output of model
        output_image = numpy.floor(output_image*255)
        cv2.imwrite('debug/y_preds-'+str(test_n[idx])+'.png',output_image)