Exemplo n.º 1
0
    def controlled_uniform_traffic(self):
        t = np.random.uniform(0, self.capacity*1.25)
        if self.prev_traffic is None:
            self.prev_traffic = np.asarray(t * softmax(np.random.uniform(0, 1, size=[self.nodes_num]*2))).clip(min=0.001)
        dist = [1]
        dist += [0]*(self.nodes_num**2 - 1)
        ch = np.random.choice(dist, [self.nodes_num]*2)

        tt = np.multiply(self.prev_traffic, 1 - ch)

        nt = np.asarray(t * softmax(np.random.uniform(0, 1, size=[self.nodes_num]*2))).clip(min=0.001)
        nt = np.multiply(nt, ch)

        self.prev_traffic = tt + nt

        return self.prev_traffic
Exemplo n.º 2
0
def forward_prop(Batch_Norm, param, x):
    w1, w2, w3, b1, b2, b3 = param['w1'], param['w2'], param['w3'], param[
        'b1'], param['b2'], param['b3']
    # input to hidden layer- pre activation
    a1 = np.dot(x, w1) + b1

    if Batch_Norm == True:
        #send it to batch norm
        a1, param = BatchNorm.forward(a1, param, level=1)

    #hidden layer activation
    h1 = helper.sigmoid(a1)
    #hidden layer to hidden layer - pre-activation
    a2 = np.dot(h1, w2) + b2

    if Batch_Norm == True:
        #send it to batch norm
        a2, param = BatchNorm.forward(a2, param, level=2)

    #hidden layer activation
    h2 = helper.sigmoid(a2)
    #hidden layer to output - pre-activation
    a3 = np.dot(h2, w3) + b3

    if Batch_Norm == True:
        #send it to batch norm
        a3, param = BatchNorm.forward(a3, param, level=3)

    #output layer activation resulting in probability scores
    prob_scores = helper.softmax(a3)
    return prob_scores, h1, h2
Exemplo n.º 3
0
def run_fuzzing(dataset_name,
                model,
                x_train,
                y_train,
                x_test,
                y_test,
                model_layer,
                folder_to_store,
                order_numbers=10):

    for order_number in range(0, order_numbers):

        file_path = '{}nc_index_{}.npy'.format(folder_to_store, order_number)

        # only perform fuzzing if the file does not exist
        if not os.path.exists(file_path):
            nc_index = {}
            nc_number = 0
            lower_bound = 3000 * order_number
            upper_bound = 3000 * (order_number + 1)

            if lower_bound > len(x_train): lower_bound = len(x_train)

            if upper_bound > len(x_train): upper_bound = len(x_train)

            for i in range(lower_bound, upper_bound):
                new_image = mutate(x_train[i], dataset_name)

                if i == 5000 * order_number + 1000 or i == 5000 * order_number + 3000:
                    print(
                        "-------------------------------------THIS IS {}-------------------------------------"
                        .format(i))
                if softmax(model.predict(np.expand_dims(
                        new_image, axis=0))).argmax(axis=-1) != softmax(
                            model.predict(np.expand_dims(
                                x_train[i], axis=0))).argmax(axis=-1):

                    nc_symbol = compare_nc(model, x_train, y_train, x_test,
                                           y_test, new_image, x_train[i],
                                           model_layer)

                    if nc_symbol == True:
                        nc_index[i] = new_image
                        nc_number += 1

            print(nc_number)
            np.save(file_path, nc_index)
Exemplo n.º 4
0
 def __init__(self, distribution):
     """Create a new sequence distribution object."""
     if isinstance(distribution,
                   np.ndarray) and not (distribution.dtype == np.uint8):
         # right type!
         self.seq = helper.softmax(np.log(distribution))
     else:
         raise TypeError('Sequence is not an accepted type')
Exemplo n.º 5
0
 def get_importance(self,
                    seq,
                    viz=False,
                    start=None,
                    end=None,
                    plot=False,
                    temp=.1):
     """Generate the gradient based importance of a sequence according to a given model.
     
     Arguments:
          seq -- the Sequence to run through the keras model.
          viz -- sequence logo of importance?
          start -- plot only past this nucleotide.
          end -- plot only to this nucleotide.
          plot -- generate a gain-loss plot?
     Outputs:
          diffs -- difference at each position to score.
          average_diffs -- base by base importance value. 
          masked_diffs -- importance for bases in origonal sequence.
     """
     score = self.get_activation(seq)
     mutant_preds = self.get_activation(seq.ngram_mutant_gen())
     #get the right shape
     mutant_preds = mutant_preds.reshape((-1, 4))[:len(seq.seq)]
     diffs = mutant_preds - score
     # we want the difference for each nucleotide at a position minus the average difference at that position
     average_diffs = list()
     for base_seq, base_preds in zip(seq.seq, mutant_preds):
         this_base = list()
         for idx in range(4):
             this_base.append(base_preds[idx] - np.average(base_preds))
         average_diffs.append(list(this_base))
     average_diffs = np.asarray(average_diffs)
     # masked by the actual base
     masked_diffs = (seq.seq * average_diffs)
     if plot:
         # plot the gain-loss curve
         plt.figure(figsize=(20, 2))
         plt.plot(np.amax(diffs, axis=1)[start:end])
         plt.plot(np.amin(diffs, axis=1)[start:end])
         plt.title('Prediciton Difference for a Mutagenisis Scan')
         plt.ylabel('importance (difference)')
         plt.xlabel('nucleotide')
         plt.show()
     if viz:
         temp = temp
         #print('Prediciton Difference')
         #viz_sequence.plot_weights(average_diffs[start:end])
         print('Masked average prediciton difference')
         viz_sequence.plot_weights(masked_diffs[start:end])
         #print('Softmax prediction difference')
         #viz_sequence.plot_weights(helper.softmax(diffs[start:end]))
         print('Information Content of Softmax prediction difference')
         viz_sequence.plot_icweights(
             helper.softmax(diffs[start:end] /
                            (temp * self.get_activation(seq))))
     return diffs, average_diffs, masked_diffs
Exemplo n.º 6
0
 def forward(self,X,W1,W2,b1,b2):
     '''
     Forward propogation function for Neural Network
     '''
     p1=(X).dot(W1)+b1 #Outputs the predictions from layer one
     Z1=sigmoid(p1)       #Sigmoid values from layer one
     p2 = Z1.dot(W2) + b2 #Outputs from hidden layer (layer two)
     pY=softmax(p2) #Final Predictions based on layer two input
     return pY,p1
Exemplo n.º 7
0
def forward_prop(param, x):
    w1, w2, b1, b2 = param['w1'], param['w2'], param['b1'], param['b2']
    # input to hidden layer- pre activation
    a1 = np.dot(x, w1) + b1
    #hidden layer activation
    h1 = helper.sigmoid(a1)
    #h1 = helper.tanh(a1) #for tanh uncomment this
    #h1 = helper.relu_activation(a1)
    #input to output layer - pre-activation
    a2 = np.dot(h1, w2) + b2
    #output layer activation resulting in probability scores
    prob_scores = helper.softmax(a2)
    return prob_scores, h1
Exemplo n.º 8
0
 def normal_traffic(self):
     t = np.random.normal(self.capacity/2, self.capacity/2)
     return np.asarray(t * softmax(np.random.randn(self.nodes_num, self.nodes_num))).clip(min=0.001)
Exemplo n.º 9
0
            model_layer = len(model.layers) - 1

            # load dataset
            x_train, y_train, x_test, y_test = load_data(dataset)

            for order_number in range(0, 10):
                nc_index = {}
                nc_number = 0
                for i in range(3000 * order_number, 3000 * (order_number + 1)):
                    new_image = mutate(x_train[i], dataset)

                    if i == 5000 * order_number + 1000 or i == 5000 * order_number + 3000:
                        print(
                            "-------------------------------------THIS IS {}-------------------------------------"
                            .format(i))
                    if softmax(model.predict(np.expand_dims(
                            new_image, axis=0))).argmax(axis=-1) != softmax(
                                model.predict(
                                    np.expand_dims(x_train[i],
                                                   axis=0))).argmax(axis=-1):

                        nc_symbol = compare_nc(model, x_train, y_train, x_test,
                                               y_test, new_image, x_train[i],
                                               model_layer)

                        if nc_symbol == True:
                            nc_index[i] = new_image
                            nc_number += 1

                print(nc_number)

                ### save data
Exemplo n.º 10
0
    def gumbel_dream(self,
                     seq,
                     dream_type,
                     temp=10,
                     layer_name='final_output',
                     filter_index=0,
                     meme_library=None,
                     num_iterations=20,
                     step=None,
                     constraint=None,
                     viz=False):
        """ Dream a sequence for the given number of steps employing the gumbel-softmax reparamterization trick.

        Arguments:
            seq -- SeqDist object to iterate over.
            dream_type -- type of dreaming to do. 
                standard: update is average gradient * step
                constrained: dream the rejection of this model against the other model.
        Keywords:
            temp -- for gumbel softmax.
            layer_name -- name of the layer to optimize.
            filter_index -- which of the neurons at this filter to optimize.
            meme_library -- memes to use if applicable (default is CTCF)
            num_iterations -- how many iterations to increment over.
            step -- default is 1/10th the initial maximum gradient
            constraint -- for constrained dreaming, the model to use for rejection.
            viz -- sequence logo of importance?
        Returns:
            dream_seq -- result of the iterations.
        """
        # dreaming won't work off of true zero probabilities - if these exist we must add a pseudocount
        if np.count_nonzero(seq.seq) != np.size(seq.seq):
            print(
                'Discrete Sequence passed - converting to a distibution via pseudocount'
            )
            dream_seq = sequence.SeqDist(helper.softmax(3 * seq.seq + 1))
        else:
            dream_seq = sequence.SeqDist(seq.seq)

        # get a gradient grabbing op
        #input underlying distribution as (batch_size, 256, 4) duplications of the sequence
        dist = tf.placeholder(shape=((256, 4)),
                              name='distribution',
                              dtype=tf.float32)
        logits_dist = tf.reshape(dist, [-1, 4])
        # sample and reshape back (shape=(batch_size, 256, 4))
        # set hard=True for ST Gumbel-Softmax
        sampled_seq = tf.reshape(
            train_TFmodel.gumbel_softmax(logits_dist, temp, hard=True),
            [-1, 256, 4])
        sampled_seq = self.model.input
        if layer_name == 'final_output':
            loss = self.model.output
        else:
            max_by_direction = Lambda(lambda x: K.maximum(
                K.max(x[:x.shape[0] // 2, :, :], axis=1),
                K.max(x[x.shape[0] // 2:, ::-1, :], axis=1)),
                                      name='stackmax',
                                      output_shape=lambda s: (s[0] // 2, 1))
            layer_output = max_by_direction(self.layer_dict[layer_name].output)
            loss = layer_output[:,
                                filter_index]  #each batch and nuceotide at this neuron.
        # compute the gradient of the input seq wrt this loss and average to get the update (sampeling already weights for probability)
        if dream_type == 'constrained':
            sampled_seq = constraint.model.input
            pwm_loss = constraint.output
            grads = K.gradients(loss, sampled_seq)[0]
            pwms = K.gradients(pwm_loss, sampled_seq)[0]
            update = K.mean(helper.rejection(grads, pwms), axis=0)
        else:
            update = K.mean(K.gradients(loss, sampled_seq)[0], axis=0)
        #get a function
        update_op = K.function([sampled_seq, K.learning_phase()], [update])

        #find a step size
        if step == None:
            step = 1 / (np.amax(update_op([[dream_seq.seq] * 32, 0])[0]))
            print('Step ' + str(step))
        # print the initial sequence
        if viz:
            print('Initial Sequence')
            seq.logo()
            print('Model Prediction: ' + str(
                self.model.predict(
                    train_TFmodel.blank_batch(dream_seq.discrete_seq()))[0][0])
                  )
            self.get_importance(dream_seq, viz=True)
            print('PWM score: ' + str(dream_seq.find_pwm(viz=True)[2]))

        #iterate and dream
        for i in range(num_iterations):
            update = update_op([[dream_seq.seq] * 32, 0])[0]
            if dream_type == 'standard':
                dream_seq.seq = helper.softmax(
                    np.log(dream_seq.seq) + update * step)
            elif dream_type == 'adverse':
                dream_seq.seq = helper.softmax(
                    np.log(dream_seq.seq) + update * step - 1)
            elif dream_type == 'blocked':
                meme, position, _ = dream_seq.find_pwm(
                    meme_library=meme_library)
                update[position:position + meme.seq.shape[0]] = 0
                dream_seq.seq = helper.softmax(
                    np.log(dream_seq.seq) + update * step)
            if i % (num_iterations // 4) == 0 and viz:
                print('Sequence after ' + str(i) + ' iterations')
                viz_sequence.plot_icweights(dream_seq.seq)

        #print the final sequence
        if viz:
            print('Final sequence')
            dream_seq.logo()
            print('Model Prediction: ' + str(
                self.model.predict(
                    train_TFmodel.blank_batch(dream_seq.discrete_seq()))[0][0])
                  )
            self.get_importance(dream_seq, viz=True)
            print('PWM score: ' + str(dream_seq.find_pwm(viz=True)[2]))
        return dream_seq
Exemplo n.º 11
0
 def __init__(self, dist, pwm):
     """Create a new Meme object."""
     self.seq = helper.softmax(np.log(dist))
     self.pwm = pwm
Exemplo n.º 12
0
    def dream(self,
              seq,
              dream_type='standard',
              iterate_op=None,
              layer_name='final_output',
              filter_index=0,
              meme_library=None,
              num_iterations=20,
              step=None,
              viz=False):
        """Dream a sequence for the given number of steps.
         
        Arguments:
            seq -- SeqDist object to iterate over.
        Keywords:
            dream_type -- type of dreaming to do
                standard: update is average gradient @ base * p(base) * step
                adversarial: update is standard - 1/10 * step
                blocked: dream only outside the pwm region (should I allow the max pwm to move around? doesn't currently.)
                constrained: dream orthogal to the pwm score (DOESN'T WORK)
                strict: gradients only apply to a base if that base was in the discrete sequence chosen. 
            iterate_op -- operation to get the update step, default is maximize output. 
            layer_name -- name of the layer to optimize.
            filter_index -- which of the neurons at this filter to optimize.
            meme_library -- memes to use if applicable (default is CTCF)
            num_iterations -- how many iterations to increment over.
            step -- default is 1/10th the initial maximum gradient
            viz -- sequence logo of importance?
        Returns:
            dream_seq -- result of the iterations. 
        """
        # get an iterate operation
        if iterate_op == None:
            iterate_op = self.build_iterate(layer_name=layer_name,
                                            filter_index=filter_index)
        # dreaming won't work off of true zero probabilities - if these exist we must add a pseudocount
        if np.count_nonzero(seq.seq) != np.size(seq.seq):
            print(
                'Discrete Sequence passed - converting to a distibution via pseudocount'
            )
            dream_seq = sequence.SeqDist(helper.softmax(3 * seq.seq + 1))
        else:
            dream_seq = sequence.SeqDist(seq.seq)
        # find the meme position
        meme, position, _ = seq.find_pwm(meme_library=meme_library)
        pwm_activation = seq.run_pwm(meme=meme, position=position)
        #print the initial sequence
        if viz:
            print('Inital sequence')
            viz_sequence.plot_icweights(dream_seq.seq)
            self.get_importance(dream_seq, viz=viz)
        # find a good step size
        batch_gen = train_TFmodel.filled_batch(dream_seq.discrete_gen())
        batch = next(batch_gen)
        update_grads = iterate_op([batch, 0])[0]
        if step == None:
            step = 10 / np.amax(update_grads)
            print('step: ' + str(step))
        # apply the updates
        for i in range(num_iterations):
            batch = next(batch_gen)
            update_grads = iterate_op([batch, 0])[0]
            # figure out the type of update to do
            if dream_type == 'adversarial':
                update = np.average(update_grads,
                                    axis=0) * dream_seq.seq * step - .1 * step
            elif dream_type == 'blocked':
                update = np.average(update_grads,
                                    axis=0) * dream_seq.seq * step
                update[position:position + meme.seq.shape[0]] = 0
            elif dream_type == 'constrained':
                pwm_activation = seq.run_pwm(meme=meme, position=position)
                update = helper.rejection(
                    np.average(update_grads, axis=0) * dream_seq.seq,
                    pwm_activation) * step
            elif dream_type == 'strict':
                update = np.average(strict_grads, axis=0,
                                    weights=batch) * dream_seq.seq * step

            elif dream_type == 'standard':
                update = np.average(update_grads,
                                    axis=0) * dream_seq.seq * step
            else:
                print('Unrecognized dream type passed. Setting to standard.')
                update = np.average(update_grads,
                                    axis=0) * dream_seq.seq * step
            # we apply the update in log space so a zero update won't change anything
            dream_seq = np.log(dream_seq.seq) + update
            dream_seq = sequence.SeqDist(helper.softmax(dream_seq))
            #print intermediate sequences
            if i % (num_iterations // 4) == 0 and viz:
                print('Sequence after ' + str(i) + ' iterations')
                viz_sequence.plot_icweights(dream_seq.seq)
        #print the final sequence
        if viz:
            print('Final sequence')
            viz_sequence.plot_icweights(dream_seq.seq)
            self.get_importance(dream_seq, viz=viz)
        return dream_seq
Exemplo n.º 13
0
 def ou_traffic(self):
     t = self.total_ou.evolve()[0]
     nt = t * softmax(self.nodes_ou.evolve())
     i = np.split(nt, self.nodes_num)
     return np.vstack(i).clip(min=0.001)
Exemplo n.º 14
0
def cycle(T: int):
    assert T > 0

    # Step 1. Load the current model M_i
    current_model_path = "{}{}/{}/{}/{}.h5".format(THIS_MODEL_DIR,
                                                   dataset_name, model_name,
                                                   is_improve, str(T - 1))
    current_model = load_model(current_model_path)

    # Step 2. According to the current M_i and dataset, generate examples T_i
    ## Load the current dataset we have
    x_train, y_train, x_test, y_test = load_data(dataset_name)
    for i in range(T - 1):
        index = np.load('fuzzing/nc_index_{}.npy'.format(i),
                        allow_pickle=True).item()
        for y, x in index.items():
            x_train = np.concatenate((x_train, np.expand_dims(x, axis=0)),
                                     axis=0)
            y_train = np.concatenate(
                (y_train, np.expand_dims(y_train[y], axis=0)), axis=0)

    ## Generate new examples
    nc_index = {}
    nc_number = 0
    for i in range(5000 * (T - 1), 5000 * (T)):
        new_image = mutate(x_train[i])
        if i % 100 == 0:
            print('.', end='')
            break
        if softmax(current_model.predict(np.expand_dims(
                new_image, axis=0))).argmax(axis=-1) != softmax(
                    current_model.predict(np.expand_dims(
                        x_train[i], axis=0))).argmax(axis=-1):
            # find an adversarial example
            nc_symbol = compare_nc(current_model, x_train, y_train, x_test,
                                   y_test, new_image, x_train[i], model_layer)
            if nc_symbol and improve_coverage:
                # new image can cover more neurons, and we want such improvements
                nc_index[i] = new_image
                nc_number += 1

            if (not improve_coverage) and (not nc_symbol):
                # new image CANNOT cover more neurons, and we want examples cannot improve coverage
                nc_index[i] = new_image
                nc_number += 1

    print(nc_number)
    data_folder = 'fuzzing/{}/{}/{}'.format(dataset_name, model_name,
                                            is_improve)
    os.makedirs(data_folder, exist_ok=True)
    np.save(os.path.join(data_folder, 'nc_index_{}.npy'.format(T)), nc_index)

    # Step 3. Retrain M_i against T_i, to obtain M_{i+1}
    ## Augment the newly generate examples into the training data

    index = np.load(os.path.join(data_folder, 'nc_index_{}.npy'.format(T)),
                    allow_pickle=True).item()
    for y, x in index.items():
        x_train = np.concatenate((x_train, np.expand_dims(x, axis=0)), axis=0)
        y_train = np.concatenate((y_train, np.expand_dims(y_train[y], axis=0)),
                                 axis=0)

    ## Retrain the model
    retrained_model = retrain(current_model,
                              x_train,
                              y_train,
                              x_test,
                              y_test,
                              batch_size=128,
                              epochs=5)
    new_model_path = "{}{}/{}/{}/{}.h5".format(THIS_MODEL_DIR, dataset_name,
                                               model_name, is_improve, str(T))
    retrained_model.save(new_model_path)

    # Step 4. Evaluate the current model
    ## Evaluate coverage
    evaluate_coverage(retrained_model, l, T, x_train, y_train, x_test, y_test)

    ## Evaluate robustness
    store_path = 'new_test/{}/{}'.format(dataset_name, model_name)
    x_test_new = np.load(os.path.join(store_path, 'x_test_new.npy'))
    evaluate_robustness(T, retrained_model, x_test, y_test, x_test_new)

    print("Done")
 def upd_env_W(self, vector):
     self.env_W = np.asarray(softmax(vector))
Exemplo n.º 16
0
def cycle(T: int):

    # Step 1. Load the current model M_i

    current_model_path = "{}{}/{}/{}/{}.h5".format(THIS_MODEL_DIR,
                                                   dataset_name, model_name,
                                                   is_improve, str(0))
    # else:
    #     current_model_path = "{}{}/{}/{}/{}.h5".format(THIS_MODEL_DIR, dataset_name, model_name, is_improve, str(T-1))
    current_model = load_model(current_model_path)

    # Step 2. According to the current M_i and dataset, generate examples T_i
    ## Load the current dataset we have
    x_train, y_train, x_test, y_test = load_data(dataset_name)

    if not os.path.exists(
            os.path.join('new_test/{}/{}'.format(dataset_name, model_name),
                         'x_test_new.npy')):
        print("Generate test set")

        new_images = []
        for i in tqdm(range(len(x_test)), desc="transformation ......"):
            new_images.append(mutate(x_test[i]))

        nc_index = {}
        nc_number = 0
        for i in tqdm(range(0, len(x_test), 500), desc="Total progress:"):
            for index, (pred_new, pred_old) in enumerate(
                    zip(
                        softmax(model.predict(np.array(
                            new_images[i:i + 500]))).argmax(axis=-1),
                        softmax(model.predict(x_test[i:i +
                                                     500])).argmax(axis=-1))):
                nc_symbol = compare_nc(model, x_train, y_train, x_test, y_test,
                                       new_images[i + index],
                                       x_test[i + index], model_layer)
                if nc_symbol == True:
                    nc_index[i + index] = new_images[i + index]
                    nc_number += 1

        print("Log: new image can cover more neurons: {}".format(nc_number))
        store_path = 'new_test/{}/{}'.format(dataset_name, model_name)
        os.makedirs(store_path, exist_ok=True)
        for y, x in nc_index.items():
            x_test[y] = x
        np.save(os.path.join(store_path, 'x_test_new.npy'), x_test)

    data_folder = 'fuzzing/{}/{}/{}'.format(dataset_name, model_name,
                                            is_improve)
    os.makedirs(data_folder, exist_ok=True)
    if not T == 0:
        if not os.path.exists(os.path.join(data_folder, "new_images.npy")):
            print("Log: Start do transformation in images")
            new_images = []
            for i in tqdm(range(len(x_train))):
                new_images.append(mutate(x_train[i]))
            np.save(os.path.join(data_folder, "new_images.npy"), new_images)
        else:
            print("Log: Load mutantions.")
            new_images = np.load(os.path.join(data_folder, "new_images.npy"))

        for i in range(1, T):
            index = np.load('fuzzing/{}/{}/{}/nc_index_{}.npy'.format(
                dataset_name, model_name, is_improve, i),
                            allow_pickle=True).item()
            for y, x in index.items():
                x_train = np.concatenate((x_train, np.expand_dims(x, axis=0)),
                                         axis=0)
                y_train = np.concatenate(
                    (y_train, np.expand_dims(y_train[y], axis=0)), axis=0)

        if not os.path.exists(
                os.path.join(data_folder, 'nc_index_{}.npy'.format(T))):
            ## Generate new examples
            nc_index = {}
            nc_number = 0
            for i in tqdm(range(5000 * (T - 1), 5000 * (T), 500),
                          desc="Total progress:"):
                for index, (pred_new, pred_old) in enumerate(
                        zip(
                            softmax(
                                current_model.predict(
                                    np.array(new_images[i:i + 500]))).argmax(
                                        axis=-1),
                            softmax(current_model.predict(
                                x_train[i:i + 500])).argmax(axis=-1))):
                    # find an adversarial example
                    if pred_new != pred_old:
                        nc_symbol = compare_nc(current_model, x_train, y_train,
                                               x_test, y_test,
                                               new_images[i + index],
                                               x_train[i + index], model_layer)
                        if nc_symbol and improve_coverage:
                            # new image can cover more neurons, and we want such improvements
                            nc_index[i + index] = new_images[i + index]
                            nc_number += 1

                        if (not improve_coverage) and (not nc_symbol):
                            # new image CANNOT cover more neurons, and we want examples cannot improve coverage
                            nc_index[i + index] = new_images[i + index]
                            nc_number += 1

            print("Log: new image can/cannot cover more neurons: {}".format(
                nc_number))

            np.save(os.path.join(data_folder, 'nc_index_{}.npy'.format(T)),
                    nc_index)

        # Step 3. Retrain M_i against T_i, to obtain M_{i+1}
        ## Augment the newly generate examples into the training data

        index = np.load(os.path.join(data_folder, 'nc_index_{}.npy'.format(T)),
                        allow_pickle=True).item()
        for y, x in index.items():
            x_train = np.concatenate((x_train, np.expand_dims(x, axis=0)),
                                     axis=0)
            y_train = np.concatenate(
                (y_train, np.expand_dims(y_train[y], axis=0)), axis=0)

    # Step 4. Evaluate the current model
    ## Evaluate coverage
    print(x_train.shape)
    print("\nEvaluate coverage ......")
    evaluate_coverage(current_model, l, T, x_train, y_train, x_test, y_test)

    ## Retrain the model
    if not T == 0:
        retrained_model = retrain(current_model,
                                  x_train,
                                  y_train,
                                  x_test,
                                  y_test,
                                  batch_size=128,
                                  epochs=5)
        new_model_path = "{}{}/{}/{}/{}.h5".format(THIS_MODEL_DIR,
                                                   dataset_name, model_name,
                                                   is_improve, str(T))
        retrained_model.save(new_model_path)

    ## Evaluate robustness
    print("\nEvaluate robustness ......")
    store_path = 'new_test/{}/{}'.format(dataset_name, model_name)
    x_test_new = np.load(os.path.join(store_path, 'x_test_new.npy'),
                         allow_pickle=True)
    evaluate_robustness(T, current_model, x_test, y_test, x_test_new)

    print("Done\n")
Exemplo n.º 17
0
def test_softmax(softmax_input):
    from helper import softmax
    res = softmax(softmax_input)

    return res
Exemplo n.º 18
0
            if lower_bound > len(new_images): lower_bound = len(new_images)

            if upper_bound > len(new_images): upper_bound = len(new_images)

            step = int((upper_bound - lower_bound) / 10)
            for i in tqdm(range(lower_bound, upper_bound, step),
                          desc="Total progress:"):

                left_idx = i
                right_idx = min(i + step, upper_bound)

                for index, (pred_new, pred_old) in enumerate(
                        zip(
                            softmax(
                                model.predict(
                                    np.array(new_images[left_idx:right_idx]))).
                            argmax(axis=-1),
                            softmax(model.predict(
                                x_test[left_idx:right_idx])).argmax(axis=-1))):
                    nc_symbol = compare_nc(model, x_train, y_train, x_test,
                                           y_test, new_images[i + index],
                                           x_test[i + index], model_layer)
                    if nc_symbol == True:
                        nc_index[i + index] = new_images[i + index]
                        nc_number += 1

            print(
                "Log: new image can cover more neurons: {}".format(nc_number))
            np.save(nc_index_path, nc_index)

    for order_number in range(2):
Exemplo n.º 19
0
 def uniform_traffic(self):
     t = np.random.uniform(0, self.capacity*1.25)
     return np.asarray(t * softmax(np.random.uniform(0, 1, size=[self.nodes_num]*2))).clip(min=0.001)
Exemplo n.º 20
0
    def _propagate(self, image, explore='none', label=None):
        """ 
		Propagates a single image through the network and return its classification along with activation of neurons in the network. 

		Args:
			images (numpy array): 2D input image to propagate
			explore (str, optional): determines in which layer to add exploration noise; correct values are 'none', 'conv', 'feedf'
			label (int, optional): label of the current image

		returns:
			(int): classifcation of the network
			(numpy array): input to the convolutional filters
			(numpy array): activation of the convolutional filters
			(numpy array): activation of the subsampling layer
			(numpy array): activation of the feedforward layer
			(numpy array): activation of the classification layer *without* addition of noise for exploration
			(numpy array): activation of the classification layer *with* addition of noise for exploration

		"""

        #get input to the convolutional filter
        conv_input = np.zeros((self.conv_neuron_num, self.conv_filter_side**2))
        conv_input = hp.get_conv_input(image, conv_input,
                                       self.conv_filter_side)
        conv_input = hp.normalize_numba(conv_input, self.A)

        #activate convolutional feature maps
        conv_activ = hp.propagate_layerwise(conv_input, self.conv_W, SM=False)
        if explore == 'conv' or explore == 'both':
            conv_activ_noise = conv_activ + np.random.normal(
                0,
                np.std(conv_activ) * self.noise_explore_conv,
                np.shape(conv_activ))
            conv_activ_noise = hp.softmax(conv_activ_noise, t=self.t_conv)
            #subsample feature maps
            subs_activ_noise = hp.subsample(conv_activ_noise,
                                            self.conv_map_side,
                                            self.conv_map_num,
                                            self.subs_map_side,
                                            self.subs_stride)

        #subsample feature maps
        conv_activ = hp.softmax(conv_activ,
                                t=self.t_conv)  ###<- softmax before pooling
        subs_activ = hp.subsample(conv_activ, self.conv_map_side,
                                  self.conv_map_num, self.subs_map_side,
                                  self.subs_stride)
        # conv_activ = hp.softmax(conv_activ, t=self.t_conv) ###<- softmax after pooling

        #activate feedforward layer
        feedf_activ = hp.propagate_layerwise(subs_activ,
                                             self.feedf_W,
                                             SM=False)

        #add exploration
        if explore == 'feedf':
            feedf_activ_noise = feedf_activ + np.random.normal(
                0,
                np.std(feedf_activ) * self.noise_explore_feedf,
                np.shape(feedf_activ))
        elif explore == 'conv' or explore == 'both':
            feedf_activ_noise = hp.propagate_layerwise(subs_activ_noise,
                                                       self.feedf_W,
                                                       SM=False)
        if explore == 'both':
            feedf_activ_noise = feedf_activ_noise + np.random.normal(
                0,
                np.std(feedf_activ) * self.noise_explore_feedf,
                np.shape(feedf_activ))
        if explore == 'feedf' or explore == 'conv' or explore == 'both':
            feedf_activ_noise = hp.softmax(feedf_activ_noise, t=self.t_feedf)
            if self.classifier == 'neural_dopa':
                class_activ_noise = hp.propagate_layerwise(feedf_activ_noise,
                                                           self.class_W,
                                                           SM=True,
                                                           t=0.001)
            elif self.classifier == 'neural_prob':
                class_activ_noise = np.dot(feedf_activ_noise, self.class_W)

        feedf_activ = hp.softmax(feedf_activ, t=self.t_feedf)

        #activate classification layer
        if self.classifier == 'neural_dopa':
            class_activ = hp.propagate_layerwise(feedf_activ,
                                                 self.class_W,
                                                 SM=True,
                                                 t=0.001)
        elif self.classifier == 'neural_prob':
            class_activ = np.dot(feedf_activ, self.class_W)

        #save activation of feedforward layer for computation of output weights
        if label is not None:
            self._feedf_activ_all = np.roll(self._feedf_activ_all, 1, axis=0)
            self._feedf_activ_all[0, :] = feedf_activ
            self._labels_all = np.roll(self._labels_all, 1)
            self._labels_all[0] = label

        if explore == 'none':
            return np.argmax(
                class_activ
            ), conv_input, conv_activ, subs_activ, feedf_activ, class_activ, class_activ
        elif explore == 'feedf':
            return np.argmax(
                class_activ
            ), conv_input, conv_activ, subs_activ, feedf_activ_noise, class_activ, class_activ_noise
        elif explore == 'conv' or explore == 'both':
            return np.argmax(
                class_activ
            ), conv_input, conv_activ_noise, subs_activ_noise, feedf_activ_noise, class_activ, class_activ_noise