def compute_predictions(self):

        n_days = self.model.datareader.n_test_batches

        start_days = 0
        if self.args.debug:
            start_days = 120
            n_days = start_days+self.batched_days

        mean_mse = 0.
        mean_distance = 0.
        mean_abs_per_step = np.zeros(INPUT_LENGTH)
        if self.args.log_batch:
            mean_log_batch = np.zeros((self.log_batch_size,3))
        n_batches = 0

        for i in range(start_days, n_days,self.batched_days):

            n_batches += 1

            bound = min(self.batched_days, n_days - i)

            x_cat, y_cat, n_per_day = self.get_x_y(i, bound)

            fixed_c_attack = attacks.FixedCAttack(self.model,
                                        self.args,
                                        self.mean_step_return,
                                        self.c,
                                        self.batch_size,
                                        self.log_batch_size)

            if self.args.log_batch:
                perturbation,norm,distance,logs= fixed_c_attack.attack(x_cat,n_per_day)
            else:
                perturbation, norm, distance = fixed_c_attack.attack(x_cat,n_per_day)

            # Average perturbation per time step
            for mode in ["buy","sell"]:
                mean_abs_per_step += np.mean(np.mean(np.abs(
                    utils.convert_from_tensor(perturbation[mode])),axis=1),axis=1)
                mean_mse += np.mean(np.sqrt(norm[mode]))
                mean_distance += np.mean(np.sqrt(distance[mode]))

                if self.args.log_batch:
                    mean_log_batch += logs[mode]

        mean_mse /= 2.*n_batches
        mean_distance /= 2*n_batches
        if self.args.log_batch:
            mean_log_batch /= 2*n_batches

        mean_abs_per_step = list(utils.convert_from_tensor(mean_abs_per_step))

        entry = [mean_mse,mean_distance]+mean_abs_per_step
        if self.args.log_batch:
            return entry,mean_log_batch
        else:
            return entry
    def compute_metrics(self, mode, output, sem, mean_output):

        h = scipy.stats.t.ppf(self.args.confidence, self.args.samples)

        if mode == "buy":
            bound = output - h * sem
            test = bound >= mean_output

        elif mode == "sell":
            bound = output + h * sem
            test = bound <= mean_output

        number = np.count_nonzero(utils.convert_from_tensor(test))

        return number
Exemplo n.º 3
0
    def compute_mean(self, folder):

        year = int(folder.split("_")[-1])

        datar = data_reader.DataReader(year, torch.device('cpu'))

        n_days = datar.n_test_batches

        values = []

        for i in range(n_days):
            x, y = datar.get_test_batch(i, cumulative=True)

            if self.args.conditional:
                y_step = y[:, self.args.step_prediction - 1]
            else:
                y_step = y[:, self.args.steps - 1]

            values.append(utils.convert_from_tensor(y_step))

        values = np.array(values)
        mean = values.mean()
        return mean
Exemplo n.º 4
0
    def attack_batch(self, data, id_batch, v_batch, labels, hidden, cell, estimator):

            batch_size = data.shape[1]

            with torch.no_grad():
                _, original_mu, original_sigma = attack_utils.forward_model(model,
                                                                          data,
                                                                          id_batch,
                                                                          v_batch,
                                                                          hidden,
                                                                          cell,
                                                                          self.params)

            shape = (self.max_pert_len,) + data.shape[:2]

            best_perturbation = {"double": np.zeros(shape),
                                 "zero": np.zeros(shape)}

            c_shape = (self.max_pert_len, data.shape[1])
            best_c = {"double": np.zeros(c_shape),
                      "zero": np.zeros(c_shape)}
            best_distance = {"double": np.full(c_shape, np.inf),
                             "zero": np.full(c_shape, np.inf)}

            out_shape = (self.max_pert_len,)+ original_mu.shape
            perturbed_output_mu = {"double": np.zeros(out_shape),
                                   "zero": np.zeros(out_shape)}
            perturbed_output_sigma = {"double": np.zeros(out_shape),
                                      "zero": np.zeros(out_shape)}

            modes = ["zero","double"]
            targets = {}

            lines = []

            for mode in modes:

                print("mode",mode)

                # Loop on values of c to find successful attack with minimum perturbation

                for i in range(0, len(self.params.c), self.params.batch_c):

                    bound = min(self.params.batch_c,len(self.params.c)-i)
                    print("c",self.params.c[i:i+bound])

                    batched_data = data.repeat(1,bound,1)

                    batched_id_batch = id_batch.repeat(1,bound)
                    batched_v_batch = v_batch.repeat(bound,1)
                    batched_labels = labels.repeat(bound)
                    batched_hidden = hidden.repeat(1,bound,1)
                    batched_cell = cell.repeat(1,bound,1)

                    batched_c = torch.cat([self.params.c[i+j]*\
                                           torch.ones(batch_size,device=self.params.device)\
                                           for j in range(bound)],dim = 0)

                    # Update the lines
                    attack_module = AttackModule(self.model,
                                                 self.params,
                                                 batched_c,
                                                 batched_data,
                                                 batched_id_batch,
                                                 batched_v_batch,
                                                 batched_hidden,
                                                 batched_cell)

                    batched_target = attack_module.generate_target(batched_labels,mode)

                    optimizer = optim.Adam([attack_module.perturbation], lr=self.params.learning_rate)

                    # Iterate steps
                    for k in range(self.params.n_iterations):

                        if estimator == "ours":
                            self.attack_step_ours(attack_module, optimizer, k, batched_target)
                        elif estimator == "naive":
                            self.attack_step_naive(attack_module, optimizer, k, batched_target)
                        else:
                            raise Exception("No such estimator")

                    # Evaluate the attack
                    # Run full number of samples on perturbed input to obtain perturbed output
                    with torch.no_grad():
                        _,batched_perturbed_output,_ = attack_module()

                        # Unbatch c to run everything from this
                        for j in range(bound):

                            c = self.params.c[i+j]

                            left = batch_size*j
                            right = batch_size*(j+1)

                            target = batched_target[left:right]
                            targets[mode] = target

                            perturbed_output = batched_perturbed_output[left:right]
                            v_batch = batched_v_batch[left:right]

                            loss = attack_utils.AttackLoss(self.params,c,v_batch)

                            norm_per_sample, distance_per_sample, loss_per_sample, norm, distance, loss = \
                                loss(attack_module.perturbation[:,left:right],
                                                          perturbed_output,
                                                          target)

                            # Find
                            numpy_norm = np.sqrt(utils.convert_from_tensor(norm_per_sample))
                            numpy_distance = utils.convert_from_tensor(distance_per_sample)
                            numpy_perturbation = utils.convert_from_tensor(
                                attack_module.perturbation.data[:,left:right])

                            #print("numpy perturbation",attack_module.perturbation.data[:,0])

                            #self.print(i, norm, distance, loss, norm_per_sample.shape[0])

                            for l in range(self.max_pert_len):

                                indexes_best_c = np.logical_and(numpy_norm <= self.params.tolerance[l],
                                                                numpy_distance < best_distance[mode][l])

                                best_perturbation[mode][l][:, indexes_best_c] = \
                                    numpy_perturbation[:, indexes_best_c]
                                best_distance[mode][l, indexes_best_c] = \
                                    numpy_distance[indexes_best_c]
                                best_c[mode][l, indexes_best_c] = c

                            # Save norm and distance for c plot
                            mean_numpy_norm = np.mean(numpy_norm)
                            mean_distance = np.mean(np.sqrt(numpy_distance))

                            lines.append([estimator,mode,c,mean_numpy_norm,mean_distance])

                with torch.no_grad():

                    # Update the lines
                    attack_module = AttackModule(self.model,
                                                 self.params,
                                                 0,
                                                 data,
                                                 id_batch,
                                                 v_batch,
                                                 hidden,
                                                 cell)

                    for l in range(self.max_pert_len):

                        attack_module.perturbation.data = \
                            torch.tensor(best_perturbation[mode][l],
                                         device=self.params.device).float()
                        _,aux1,aux2 = attack_module()

                        perturbed_output_mu[mode][l] = aux1.cpu().numpy()
                        perturbed_output_sigma[mode][l] = aux2.cpu().numpy()


            return original_mu,original_sigma,best_c, best_perturbation, \
                   best_distance, perturbed_output_mu, perturbed_output_sigma,\
                   targets,lines
    def prediction_wrapper(self, batch_size, predictions, binary_predictions,
                           n_bins, binning, input_, n_steps):

        if binning:

            mean_list = [0] * len(self.log_samples)
            binary_mean_list = [0] * len(self.log_samples)

            mean = np.zeros((batch_size, len(predictions["eval"]), n_bins + 1))
            binary_mean = np.zeros(
                (batch_size, len(binary_predictions["eval"]), 3))

        else:
            mean_list = [0] * len(self.log_samples)
            mean = torch.zeros(batch_size, n_steps, 1).to(self.device)

        if binning:
            mean_time_sample = 0.
            mean_time_binning = 0.
        for i in range(self.n_samples):

            start_time = time.time()

            sample, target = self.forward_prediction_sample(*input_, n_steps)

            if binning:

                mid_time = time.time()
                mean_time_sample += mid_time - start_time

                res = self.compute_predictions(sample, predictions, n_bins)
                mean += res

                binary_res = self.compute_predictions(sample,
                                                      binary_predictions, 2)
                binary_mean += binary_res

                if i + 1 in self.log_samples:
                    index = self.log_samples.index(i + 1)
                    mean_list[index] = np.copy(mean)
                    binary_mean_list[index] = np.copy(binary_mean)
                    mean_list[index] /= (i + 1)
                    binary_mean_list[index] /= (i + 1)

                end_time = time.time()
                mean_time_binning += end_time - mid_time

            else:

                mean += sample

                if i + 1 in self.log_samples:

                    index = self.log_samples.index(i + 1)
                    mean_list[index] = np.copy(utils.convert_from_tensor(mean))
                    mean_list[index] /= (i + 1)

        if binning:

            return mean_list, binary_mean_list

        else:
            return mean_list
        def compute_predictions(self):

            n_days = self.model.datareader.n_test_batches

            start_days = 0
            if self.args.debug:
                n_days = start_days + self.args.days
            n_batches = 0

            number_buy = np.zeros(self.max_pert_len)
            number_sell = np.zeros(self.max_pert_len)
            total_number = 0.

            if self.args.target == "regression":
                mean_perturbed = np.zeros((self.k_len,self.max_pert_len))

            for i in range(start_days, n_days, self.args.days):

                n_batches += 1
                bound = min(self.args.days, n_days - i)

                x_cat,y_cat,n_per_day = self.get_x_y(i,bound)

                if self.args.target == "binary":
                    best_c,best_perturbation,best_distance,percentage = \
                        self.attack.attack(x_cat,n_per_day)

                    for l in range(self.max_pert_len):
                        for mode in ["buy","sell"]:
                            aux = np.sqrt((best_perturbation[mode][l]**2).sum(0).reshape(-1))

                            test_indexes = np.logical_or( aux < 0,aux > self.args.max_pert[l])
                            if np.any(test_indexes):
                                print("Max perturbation",self.args.max_pert[l])
                                print(aux[test_indexes])

                            assert(np.all(np.logical_and(0 <= aux,aux <= self.args.max_pert[l])))

                    number_buy += np.array(percentage["buy"])
                    number_sell += np.array(percentage["sell"])

                    total_number += x_cat.shape[1]
                else:

                    perturbed_output = self.attack.attack(x_cat,n_per_day,ground_truth=y_cat)
                    for l in range(self.max_pert_len):

                        for j in range(bound):

                            left = n_per_day * j
                            right = n_per_day * (j + 1)
                            perturbed_output_day_j = perturbed_output[l][left:right]

                            y = y_cat[:, self.args.steps - 1].view(-1).cpu().numpy()
                            y_day_j = y[left:right]

                            perturbed_output_day_j = utils.convert_from_tensor(perturbed_output_day_j)

                            for m in range(self.k_len):
                                mean_perturbed[m,l] += utils.get_returns(perturbed_output_day_j,
                                                                         y_day_j,
                                                                         self.args.k[m])

            if self.args.target == "binary":
                per_buy = number_buy / total_number
                per_sell = number_sell / total_number

                return {"buy":list(per_buy),"sell":list(per_sell)}
            else:

                mean_perturbed /= float(n_days - start_days)
                return list(mean_perturbed)
    def attack(self, input_, n_per_day, ground_truth=None):

        if self.target_type == "binary":
            best_perturbation = {
                "buy": np.zeros((self.max_pert_len, ) + input_.shape),
                "sell": np.zeros((self.max_pert_len, ) + input_.shape)
            }

            c_shape = (self.max_pert_len, input_.shape[1])
            best_c = {"buy": np.zeros(c_shape), "sell": np.zeros(c_shape)}
            best_distance = {
                "buy": np.full(c_shape, np.inf),
                "sell": np.full(c_shape, np.inf)
            }
            percentage = {}
        else:
            best_perturbation = {
                "regression": np.zeros((self.max_pert_len, ) + input_.shape)
            }

            c_shape = (self.max_pert_len, input_.shape[1])
            best_c = {"regression": np.zeros(c_shape)}
            best_distance = {"regression": np.full(c_shape, np.inf)}
            perturbed_outputs = []

        if self.target_type == "binary":
            modes = ["buy", "sell"]
        else:
            modes = ["regression"]

        for mode in modes:
            # Loop on values of c to find successful attack with minimum perturbation

            for i in range(len(self.args.c)):

                c = self.args.c[i]

                # Create attack module with parameters
                if self.args.conditional:
                    attack_module = ConditionalAttackModule(
                        self.model, self.args, c, input_)
                else:
                    attack_module = AttackModule(self.model, self.args, c,
                                                 input_)

                target, mean_output = attack_module.generate_target(
                    input_.shape[1],
                    self.mean,
                    mode,
                    n_per_day,
                    ground_truth=ground_truth,
                    steps=self.args.steps,
                    estimator=self.estimator)
                optimizer = optim.RMSprop([attack_module.perturbation],
                                          lr=self.args.learning_rate)

                # Iterate steps
                for i in range(self.args.n_iterations):

                    if self.estimator == "reparam":
                        self.attack_step_reparam(attack_module, optimizer, i,
                                                 target)
                    elif self.estimator == "score":
                        self.attack_step_score(attack_module, optimizer, i,
                                               target)
                    else:
                        raise Exception("No such estimator")

                # Evaluate the attack
                # Run full number of samples on perturbed input to obtain perturbed output
                with torch.no_grad():

                    if self.estimator == "deterministic":

                        perturbed_output = attack_module.forward_deterministic(
                        )
                    else:

                        perturbed_output = attack_module(
                            n_samples=self.args.samples)

                    norm_per_sample, distance_per_sample, loss_per_sample, norm, distance, loss = \
                        attack_module.attack_loss(attack_module.perturbation, perturbed_output, target)

                    # Find
                    numpy_norm = np.sqrt(
                        utils.convert_from_tensor(norm_per_sample))
                    numpy_distance = utils.convert_from_tensor(
                        distance_per_sample)
                    numpy_perturbation = utils.convert_from_tensor(
                        attack_module.perturbation.data)

                    for l in range(self.max_pert_len):
                        indexes_best_c = np.logical_and(
                            numpy_norm <= self.args.max_pert[l] - 0.00001,
                            numpy_distance < best_distance[mode][l])

                        best_perturbation[mode][l][:,indexes_best_c] = \
                            numpy_perturbation[:,indexes_best_c]
                        best_distance[mode][l,indexes_best_c] =\
                            numpy_distance[indexes_best_c]
                        best_c[mode][l, indexes_best_c] = c

            with torch.no_grad():
                if self.target_type == "binary":
                    percentage[mode] = []
                    for l in range(self.max_pert_len):

                        # Check if 95% confidence interval is in "buy" or "sell"
                        attack_module.perturbation.data = \
                            torch.tensor(best_perturbation[mode][l],
                                        device=attack_module.model.device).float()

                        if self.estimator == "deterministic":
                            perturbed_output = attack_module.forward_deterministic(
                            )
                            sem = 0.
                        else:
                            perturbed_output, sem = attack_module(
                                n_samples=self.args.samples, std=True)

                        metrics = self.compute_metrics(mode, perturbed_output,
                                                       sem, mean_output)
                        percentage[mode].append(metrics)
                else:

                    for l in range(self.max_pert_len):
                        # Check if 95% confidence interval is in "buy" or "sell"
                        attack_module.perturbation.data = \
                            torch.tensor(best_perturbation[mode][l],
                                         device=attack_module.model.device).float()

                        if self.estimator == "deterministic":
                            out = attack_module.forward_deterministic()
                        else:
                            out = attack_module(n_samples=self.args.samples,
                                                std=False)
                        perturbed_outputs.append(out)

        if self.target_type == "binary":
            return best_c, best_perturbation, best_distance, percentage
        else:
            return perturbed_outputs
    def attack(self, x, n_per_day):

        n_inputs = x.shape[1]

        final_perturbation = {}
        final_norm = {}
        final_distance = {}

        if self.args.log_batch:
            logs = {}

        for mode in self.modes:

            attack_module = AttackModule(self.model,
                                         self.args,
                                         self.c,
                                         x,
                                         batch_size=self.batch_size)

            target, _ = attack_module.generate_target(n_inputs,
                                                      self.mean_step_return,
                                                      mode, n_per_day)
            optimizer = optim.RMSprop([attack_module.perturbation],
                                      lr=self.args.learning_rate)

            if self.args.log_batch:
                log = np.zeros((self.log_batch_size, 3))
            else:
                log = None

            # Iterate steps
            for i in range(self.n_iterations):

                self.attack_step_reparam(attack_module,
                                         optimizer,
                                         i,
                                         target,
                                         log=log,
                                         batch_delta_log=self.batch_delta_log)

            # Evaluate the attack
            # Run full number of samples on perturbed input to obtain perturbed output
            with torch.no_grad():
                perturbed_output = attack_module(n_samples=self.args.samples)

                norm_per_sample, distance_per_sample, loss_per_sample, norm, distance, loss = \
                    attack_module.attack_loss(attack_module.perturbation, perturbed_output, target)

                final_perturbation[mode] = utils.convert_from_tensor(
                    attack_module.perturbation.detach())
                final_norm[mode] = utils.convert_from_tensor(norm_per_sample)
                final_distance[mode] = utils.convert_from_tensor(
                    distance_per_sample)

                if self.args.log_batch:
                    logs[mode] = log

        if self.args.log_batch:
            return final_perturbation, final_norm, final_distance, logs
        else:
            return final_perturbation, final_norm, final_distance