Exemplo n.º 1
0
 def extracted_sample(self, directory, clip, file_type):
     print ('{0}/{1}'.format(directory, clip))
     clip_category = ('{0}/{1}'.format(directory, clip), directory.split("/0")[1].
                      split("-")[0].strip())[1]
     clip_data = Clip('{0}/{1}'.format(directory, clip), file_type). \
         get_feature_vector()
     rows = clip_data.shape[0]
     cols = clip_data.shape[1]
     clip_label = get_label(int(clip_category), self.number_of_class).tostring()
     return clip_label, clip_data, rows, cols
Exemplo n.º 2
0
    def process_signal(self):
        self.counter += 1
        self.output_buffer = np.zeros([self.input_buffer.shape[0], self.feature_vector_size])
        threads = []
        thread_list = [i for i in range(0, self.number_of_threads)]
        for thread_id in thread_list:
            thread = PreProcessor(thread_id, self.input_buffer, self.output_buffer, config=self.config)
            thread.start()
            threads.append(thread)
        for t in threads:
            t.join()
        # with open(self.train_dir + "/feature_vectors.csv", 'a') as f:
        #         np.savetxt(f, self.output_buffer, delimiter=',', fmt='%.18e')

        clip_label = get_label(1, self.number_of_class)
        clip_filename = draw_sample_plot_and_save(self.output_buffer.flatten(), "/channel", self.thread_id, self.config)
        sample = create_sample_from_image(clip_filename, clip_label, self.config)
        # sample = create_sample_from_data(self.output_buffer.flatten(), class_label)
        self.writer.write(sample.SerializeToString())
        self.send_noise_data(json.dumps(self.input_buffer.tolist()))
        self.send_preprocessed_data(json.dumps(self.output_buffer.tolist()))
Exemplo n.º 3
0
def train(model_name, 
        backbone, 
        train_img, 
        train_annot,
        shuffle = False, 
        input_shape = (None, None, 3),
        image_format = 'channels_last',
        label_path = None,
        verify_dataset = True,
        checkpoint_path = None,
        epochs = 1,
        batch_size = 2,
        validate = False,
        val_img = None,
        val_annot = None,
        val_shuffle = False,
        val_batch_size = 1,
        optimizer_name = 'adam',
        loss_name = 'categorical_crossentropy',
        data_augment = False,
        load_weights = None,
        resume_checkpoint = False):
    
    classes = utils.get_label(label_path)
    n_classes = len(classes[0])

    model = model_from_name.get_model(model_name)(n_classes = n_classes,
                                            backbone = backbone,
                                            input_shape = input_shape,
                                            image_format = image_format)
    optimizer = optimizer_name
    loss = loss_name
    matric = [tf.keras.metrics.MeanIoU(n_classes)]
    model.compile(
        optimizer,
        loss,
        matric
    )
    model.summary()
    tf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)

    if checkpoint_path is None:
        ck_path = os.path.join(os.getcwd(), "checkpoint")
        if not os.path.isdir(checkpoint_path):
            print("creating folder checkpoint: ", ck_path)
            os.mkdir(ck_path)
    
    if resume_checkpoint:
        last_checkpoint = find_checkpoint(checkpoint_path)
        print("Loading the weights from latest checkpoint ",
                last_checkpoint)
        model.load_weights(last_checkpoint)
    
    if verify_dataset:
        assert utils.verify_dataset(train_img, train_annot)

    train_dataset = Dataset(
        train_img,
        train_annot,
        classes,
        preprocessing=utils.preprocessing
        # resize=(384, 512),
        # resample='bilinear'
    )
    train_dataloader = Dataloader(train_dataset, batch_size=batch_size, shuffle=shuffle)

    if validate:
        if verify_dataset:
            assert utils.verify_dataset(val_img, val_annot)
        valid_dataset = Dataset(
            val_img,
            val_annot,
            classes,
            preprocessing=utils.preprocessing
            # resize=(384, 512),
            # resample='bilinear'
        )
        valid_dataloader = Dataloader(valid_dataset, batch_size=val_batch_size, shuffle=val_shuffle)
    
    output_checkpoint = os.path.join(checkpoint_path, "model-{epoch:04d}.h5")
    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(output_checkpoint),
        tf.keras.callbacks.TensorBoard()
    ]

    if validate:
        history = model.fit(
            train_dataloader,
            epochs = epochs,
            callbacks = callbacks,
            steps_per_epoch=len(train_dataloader),
            validation_data = valid_dataloader,
            validation_steps = len(valid_dataloader),
            use_multiprocessing = False
        )
    else:
        history = model.fit(
            train_dataloader,
            epochs = epochs,
            callbacks = callbacks,
            steps_per_epoch=len(train_dataloader),
            use_multiprocessing = True
        )
max_iter = 50
lambda_ = 3.

x_adv, r, pred_label, fool_label, loops = sparsefool(im,
                                                     net,
                                                     lb,
                                                     ub,
                                                     lambda_,
                                                     max_iter,
                                                     device=device)

#####################
# Visualize results #
#####################
labels = open(os.path.join('synset_words.txt'), 'r').read().split('\n')
str_label_pred = get_label(labels[np.int(pred_label)].split(',')[0])
str_label_fool = get_label(labels[np.int(fool_label)].split(',')[0])

fig, axes = plt.subplots(1, 3)

axes[0].set_title(str_label_pred)
axes[1].set_title("%s pixel(s)" % repr(nnz_pixels(r.cpu().numpy().squeeze())))
axes[2].set_title(str_label_fool)

axes[0].imshow(im_orig)
axes[1].imshow(inv_tf_pert(r.cpu().numpy().squeeze()), cmap='gray')
axes[2].imshow(inv_tf(x_adv.cpu().numpy().squeeze(), mean, std))

axes[0].axis('off')
axes[1].axis('off')
axes[2].axis('off')
Exemplo n.º 5
0
    def build_captcha(self,
                      nb_train,
                      nb_val,
                      im_w,
                      im_h,
                      rdNoise=False,
                      noise=0.0,
                      fix_len=True,
                      min_str_len=6,
                      max_str_len=8,
                      rdFont=False):

        font = self.font_dir + "/Raleway-Regular.ttf"

        train_X = []
        train_Y = []
        train_Y_len = []
        split_tr = nb_train // 11
        split_char = nb_train // list_chars_len
        for i in range(nb_train):
            first_letter = ""
            for indx in range(0, list_chars_len):
                if i >= indx * split_char:
                    first_letter = "".join(list_chars[indx])
            rndLetters = first_letter

            if fix_len:
                rndLetters = rndLetters.join(
                    choice(list_chars) for _ in range(min_str_len))
            else:
                rndLetters = rndLetters.join(
                    choice(list_chars)
                    for _ in range(rdlen(min_str_len, max_str_len)))

            if i < nb_val - 5:
                rndLetters = " "

            if rdFont:
                font = randFont(self.font_dir)

            if rdNoise:
                noise = randNoise()
            else:
                noise = 0.1
                for tem in range(0, 10):
                    if i >= tem * split_tr:
                        noise = noise * tem

            img_dat = next_img(rndLetters=rndLetters,
                               font=font,
                               im_w=im_w,
                               im_h=im_h,
                               noise=noise)

            train_Y.append(get_label(rndLetters))
            train_Y_len.append(len(rndLetters))
            train_X.append(img_dat)

        self.train_X = np.array(train_X)
        self.train_Y = np.array(train_Y)
        self.train_Y_len = np.array(train_Y_len)

        val_X = []
        val_Y = []
        val_Y_len = []
        split_val = nb_val // 11
        split_char = nb_val // list_chars_len
        for i in range(nb_val):

            first_letter = ""
            for indx in range(0, list_chars_len):
                if i >= indx * split_char:
                    first_letter = "".join(list_chars[indx])

            rndLetters = first_letter

            if fix_len:
                rndLetters = rndLetters.join(
                    choice(list_chars) for _ in range(min_str_len - 1))
            else:
                rndLetters = rndLetters.join(
                    choice(list_chars)
                    for _ in range(rdlen(min_str_len - 1, max_str_len - 1)))

            if i < nb_val - 5:
                rndLetters = " "

            if rdNoise:
                noise = randNoise()
            else:
                noise = 0.1
                for tem in range(0, 10):
                    if i >= tem * split_val:
                        noise = noise * tem

            if rdFont:
                font = randFont(self.font_dir)

            img_dat = next_img(rndLetters=rndLetters,
                               font=font,
                               im_w=im_w,
                               im_h=im_h,
                               noise=noise)

            val_Y.append(get_label(rndLetters))
            val_Y_len.append(len(rndLetters))
            val_X.append(img_dat)

        self.val_X = np.array(val_X)
        self.val_Y = np.array(val_Y)
        self.val_Y_len = np.array(val_Y_len)
Exemplo n.º 6
0
    def __init__(self,
                 model_name_or_path,
                 task,
                 no_cuda,
                 max_steps,
                 train_batch_size,
                 gradient_accumulation_steps,
                 weight_decay,
                 learning_rate,
                 adam_epsilon,
                 warmup_steps,
                 max_grad_norm,
                 logging_steps,
                 save_steps,
                 eval_batch_size,
                 model_dir,
                 train_dataset=None,
                 dev_dataset=None,
                 test_dataset=None,
                 data_path=None):
        # self.args = args
        self.max_steps = max_steps
        self.train_dataset = train_dataset
        self.dev_dataset = dev_dataset
        self.test_dataset = test_dataset

        self.train_batch_size = train_batch_size
        self.gradient_accumulation_steps = gradient_accumulation_steps
        self.weight_decay = weight_decay
        self.learning_rate = learning_rate
        self.warmup_steps = warmup_steps

        self.adam_epsilon = adam_epsilon
        self.max_grad_norm = max_grad_norm
        self.logging_steps = logging_steps
        self.save_steps = save_steps
        self.eval_batch_size = eval_batch_size

        self.model_dir = model_dir

        self.label_lst = get_label(data_path)
        self.num_labels = len(self.label_lst)

        self.config_class = AutoConfig
        self.model_class = BertForSequenceClassification

        # in2label_1={str(i): label for i, label in enumerate(self.label_lst)}
        # label2id_1 = {label: i for i, label in enumerate(self.label_lst)}
        # self.config = self.config_class.from_pretrained(model_name_or_path,
        #                                                 num_labels=self.num_labels,
        #                                                 finetuning_task=task,
        #                                                 id2label={str(i): label for i, label in enumerate(self.label_lst)},
        #                                                 label2id={label: i for i, label in enumerate(self.label_lst)})

        # self.config = self.config_class.from_pretrained(model_name_or_path,
        #                                                 num_labels=self.num_labels,
        #                                                 finetuning_task=task,
        #                                                 id2label=index2label,
        #                                                 label2id=label2index)
        # self.config = self.config_class.from_pretrained(model_name_or_path,
        #                                                 num_labels=self.num_labels,
        #                                                 finetuning_task=task)
        self.config = self.config_class.from_pretrained(
            model_name_or_path,
            num_labels=self.num_labels,
            finetuning_task=task,
            id2label={str(i): label
                      for i, label in enumerate(self.label_lst)},
            label2id={label: i
                      for i, label in enumerate(self.label_lst)})

        self.model = self.model_class.from_pretrained(model_name_or_path,
                                                      config=self.config)

        # GPU or CPU
        self.device = "cuda" if torch.cuda.is_available(
        ) and not no_cuda else "cpu"
        self.model.to(self.device)