예제 #1
0
    def load_model_and_generate(self, model_name='model7_laf', epochs=10):
        dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
        dir_name = './generated_results/pdfs/' + model_name + dt + 'epochs_' + str(
            epochs) + '/'
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        model = load_model(
            './model_checkpoint/best_models/'
            'model7_laf_date_2018-06-19_12-23-39_epoch_30_val_loss_0.8395.h5',
            compile=False)
        optimizer = Adam(lr=0.0001)  # Reduce from 0.001 to 0.0001 for model_10
        model.compile(
            optimizer=optimizer,
            loss='categorical_crossentropy',
            # metrics=['accuracy']
            metrics=['accuracy'])

        seq = self.generate_and_fuzz_new_samples(model=model,
                                                 model_name=model_name,
                                                 epochs=epochs,
                                                 current_epoch=10,
                                                 dir_name=dir_name)

        list_of_obj = preprocess.get_list_of_object(seq=seq, is_sort=False)
        return list_of_obj
예제 #2
0
    def __init__(self,
                 host_id=None,
                 object_file_path=iu_config['baseline_object_path'],
                 stream_directory_path=iu_config['stream_directory_path']):
        """

        :param host_id: Name of host file without postfix, e.g. host1_max, host2_min or host3_avg
        :param object_file_path: See iu_config, new_objects_path
        :param stream_directory_path: See iu_config, stream_directory_path
        """
        self.host_id = host_id

        self.object_file_path = object_file_path
        self.obj_list = preprocess.get_list_of_object(seq=preprocess.load_from_file(self.object_file_path),
                                                      is_sort=False)

        self.stream_directory_path = '../' + stream_directory_path
        self.stream_filename_list = os.listdir(self.stream_directory_path)

        # Creating new directory foreach time that program run and we want to generate new test data
        dt = datetime.datetime.now().strftime(self.host_id + '_date_%Y-%m-%d_%H-%M-%S')
        self.storage_dir_name = iu_config['new_pdfs_directory'] + self.host_id + '/' + dt + '/'
        if not os.path.exists(self.storage_dir_name):
            os.makedirs(self.storage_dir_name)
            print('new storage directory build.')

        self.obj_getter = self.obj_generator(self.obj_list)
예제 #3
0
def get_one_object():
    """ provide one pdf data object whether an existing object in corpus or
    an online new generated object from learnt model
    this function is not complete yet!
    """
    object_file_path = '../trainset/pdf_object_trainset_100_to_500_percent33.txt'

    seq = poc.load_from_file(object_file_path)
    obj_list = poc.get_list_of_object(seq, is_sort=False)
    random_object_index = random.randint(50, len(obj_list) - 1)
    obj = obj_list[random_object_index]
    return obj
예제 #4
0
 def __get_objects_sequence(self):
     seq = ''
     for filename in os.listdir(iu_config['new_objects_path']):
         try:
             seq += preprocess.load_from_file(
                 iu_config['new_objects_path'] + filename)
         except Exception as e:
             print('Extracting failed from %s:' % filename, file=sys.stderr)
             print(str(e), file=sys.stderr)
         # finally:
     obj_list = preprocess.get_list_of_object(seq=seq, is_sort=False)
     print('obj_list len', len(obj_list))
     print(obj_list)
     # for o in obj_list:
     #     print(o, '\n', '#'*50)
     # input()
     return obj_list
예제 #5
0
def attach_new_object():
    """ incremental update pdf file """
    host_names = ['host1', 'host2', 'host3']
    with open(host_directory + host_names[0] + '.pdf', 'br') as f:
        data = f.read()
        print(len(data))

    # find last trailer in a pdf file
    trailer_index = 0
    while data.find(b'trailer', trailer_index + 7) != -1:
        trailer_index = data.find(b'trailer', trailer_index + 7)
    print('trailer_index', trailer_index)

    trailer_index_dic_endof = data.find(b'>>', trailer_index)
    print('trailer_index_dic_endof', trailer_index_dic_endof)

    trailer_content = data[trailer_index:trailer_index_dic_endof + 2]
    print('trailer_content', trailer_content)

    # find last startxref offset in a pdf file
    startxref_index = trailer_index
    while data.find(b'startxref', startxref_index + 9) != -1:
        startxref_index = data.find(b'startxref', startxref_index + 9)
    # print('index ===', index_startxref)
    index_eof = data.find(b'%%EOF', startxref_index)
    # print('index 2===', index_eof)
    if data[startxref_index + 9] == b'\n' or b'\r':
        # print('yes', data[index_startxref+9])
        startxref_index += 10
    if data[index_eof - 1] == b'\n' or b'\r':
        index_eof -= 1
    startxref_offset = int(data[startxref_index:index_eof])
    print('startxref_offset', startxref_offset)

    # print(type(trailer_content))
    trailer_content_new = trailer_content[:-2] + b'   /Prev ' \
                          + bytes(str(startxref_offset), 'ascii') + b' \n>>'
    print('trailer_content_new', trailer_content_new)

    # print(bytes(str(startxref_offset), 'ascii'))

    # load the pdf object form file
    seq = poc.load_from_file(
        host_directory + 'gen_objs_20180221_142612_epochs10_div1.5_step1.txt')
    obj_list = poc.get_list_of_object(seq)
    random_object_index = random.randint(0, len(obj_list) - 1)
    obj = obj_list[random_object_index]

    last_object_id = str(get_last_object_id(host_names[0]))

    random_rewrite_object = str(random.randint(1, int(last_object_id)))

    print('len object', len(obj))
    startxref_offset_new = len(data) + 1 + len(
        random_rewrite_object) + 3 + len(obj)  # if we attach just one obj
    print('startxref_offset_new', startxref_offset_new)

    attach_content = bytes(str(random_rewrite_object + ' 0 ' + obj + '\nxref\n0 1\n0000000000 65535 f\n' +\
                               random_rewrite_object + ' 1\n' + str(len(data)).zfill(10) + ' 00000 n\n'), 'ascii') +\
                     trailer_content_new + b'\nstartxref\n' + \
                     bytes(str(startxref_offset_new), 'ascii') + b'\n%%EOF\n'

    print('attach_content\n', attach_content)

    new_pdf_file = data + attach_content
    with open(host_directory + host_names[0] + 'iu_auto7.pdf', 'bw') as f:
        f.write(new_pdf_file)
예제 #6
0
    def generate_and_fuzz_new_samples(self,
                                      model=None,
                                      model_name='model_1',
                                      epochs=1,
                                      current_epoch=1,
                                      dir_name=None):
        """
        sampling the model and generate new object
        :param model: The model which is training.
        :param model_name: Name of model (base on hyperparameters config in deep_model.py file) e.g. [model_1, model_2,
        ...]
        :param epochs: Number of total epochs of training, e.g. 10,20,30,40,50 or 60
        :param current_epoch: Number of current epoch
        :param dir_name: root directory for this running.
        :return: Nothing
        """

        # End time of current epoch
        dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
        dir_name = dir_name + 'epoch_' + str(current_epoch) + dt + '/'
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        # Fuzzing hyper-parameters

        diversities = [i * 0.10 for i in range(1, 20, 2)]
        # diversities = [0.2, 0.5, 1.0, 1.2, 1.5, 1.8]
        # diversities = [0.5, 1.0, 1.5]  # for sou and for mou
        diversities = [1.0]

        generated_obj_total = 1100  # [5, 10, 100, 1000, 3000] {1000-1100 for sou and 3000-3100 for muo}
        generated_obj_with_same_prefix = 1  # [1, 5, 10, 20, 40] {10 for sou and 20 for mou}
        generated_obj_max_allowed_len = 400  # Choose max allowed len for object randomly
        exclude_from_fuzzing_set = {'s', 't', 'r', 'e', 'a',
                                    'm'}  # set(['s', 't', 'r', 'e', 'a', 'm'])

        # Learn and fuzz paper hyper-parameters
        t_fuzz = 0.9  # For comparision with p_fuzz where p_fuzz is a random number (if p_fuzz > t_fuzz)
        p_t = 0.9  # 0.9 and more for format fuzzing; 0.5 and less than 0.5 for data fuzzing. Now format fuzzing.

        # End of fuzzing hyper-parameters

        testset_objects_list = preprocess.get_list_of_object(self.text_test)
        testset_object_gt_maxlen_list = []
        for obj in testset_objects_list:
            if len(obj) > self.maxlen + len(' endobj'):
                testset_object_gt_maxlen_list.append(obj)
        print('len filtered test-set: ', len(testset_object_gt_maxlen_list))
        generated_total = ''
        for diversity in diversities:
            generated_total = ''
            for q in range(
                    round(generated_obj_total /
                          generated_obj_with_same_prefix)):

                obj_index = random.randint(
                    0,
                    len(testset_object_gt_maxlen_list) - 1)
                # obj_index = 0
                generated_obj_counter = 0
                generated_obj_len = 0
                generated = ''
                stop_condition = False
                endobj_attach_manually = False
                # print()
                print('-- Diversity:', diversity)

                obj_prefix = str(
                    testset_object_gt_maxlen_list[obj_index])[0:self.maxlen]
                generated += obj_prefix
                # prob_vals = '1 ' * self.maxlen
                # learnt_grammar = obj_prefix

                # print('--- Generating ts_text with seed:\n "' + obj_prefix + '"')
                # sys.stdout.write(generated)

                if generated.endswith('endobj'):
                    generated_obj_counter += 1

                if generated_obj_counter > generated_obj_with_same_prefix:
                    stop_condition = True

                while not stop_condition:
                    x_pred = np.zeros((1, self.maxlen, len(self.chars)))
                    for t, char in enumerate(obj_prefix):
                        x_pred[0, t, self.char_indices[char]] = 1.

                    preds = model.predict(x_pred, verbose=0)[0]
                    next_index, prob, preds2 = self.sample(preds, diversity)
                    next_char = self.indices_char[next_index]
                    next_char_for_prefix = next_char

                    ###### Fuzzing section we don't need it yet!
                    # if next_char not in exclude_from_fuzzing_set:
                    #     p_fuzz = random.random()
                    #     if p_fuzz > t_fuzz and preds2[next_index] > p_t:
                    #         next_index = np.argmin(preds2)
                    #         print('((Fuzz!))')
                    #     next_char = self.indices_char[next_index]
                    ###### End of fuzzing section

                    # print()
                    # print(preds2)
                    # print(np.argmax(preds))
                    # print(preds[np.argmax(preds)])

                    # print(prob)
                    # print(np.argmax(prob))
                    # print('====>',next_index)
                    # print(prob[0, next_index])

                    # prob_vals += str(preds2[next_index]) + '\n'
                    # if preds2[next_index] > 0.9980:
                    #     learnt_grammar += next_char
                    # else:
                    #     learnt_grammar += '.'
                    # input()

                    obj_prefix = obj_prefix[1:] + next_char_for_prefix
                    generated += next_char_for_prefix  # next_char
                    generated_obj_len += 1

                    if generated.endswith('endobj'):
                        generated_obj_counter += 1
                        generated_obj_len = 0
                    elif (generated.endswith('endobj') is False) and \
                            (generated_obj_len > generated_obj_max_allowed_len):
                        # Attach '\nendobj\n' manually, and reset obj_prefix
                        generated += '\nendobj\n'
                        generated_obj_counter += 1
                        generated_obj_len = 0
                        endobj_attach_manually = True

                    if generated_obj_counter >= generated_obj_with_same_prefix:  # Fix: Change > to >= (13970315)
                        stop_condition = True
                    elif endobj_attach_manually:
                        # Reset prefix:
                        # Here we need to modify obj_prefix because we manually change the generated_obj!
                        # Below we add this new repair:

                        # obj_prefix = obj_prefix[len('\nendobj\n'):] + '\nendobj\n'

                        # Instead of modify obj_prefix we can reset prefix if we found that 'endobj' dose not generate
                        # automatically. It seems to be better option, so we do this:
                        # obj_index = random.randint(0, len(testset_object_gt_maxlen_list) - 1)
                        obj_index = 0
                        obj_prefix = str(
                            testset_object_gt_maxlen_list[obj_index])[0:self.
                                                                      maxlen]
                        generated += obj_prefix
                        endobj_attach_manually = False

                    # sys.stdout.write(next_char)
                    # sys.stdout.flush()
                    # print()
                generated_total += generated + '\n'
            # save generated_result to file inside program

            file_name = model_name \
                        + '_diversity_' + repr(diversity) \
                        + '_epochs_' + repr(epochs) \
                        + '_step_' + repr(self.step) \
                        + '.txt'
            preprocess.save_to_file(dir_name + file_name, generated_total)
            # preprocess.save_to_file(dir_name + file_name + 'probabilities.txt', prob_vals)
            # preprocess.save_to_file(dir_name + file_name + 'learntgrammar.txt',learnt_grammar)
            print('Diversity %s save to file successfully.' % diversity)

        print('End of generation method.')
        print('Starting new epoch ...')
        return generated_total
def train():
    trainset_path = './trainset/pdfobjs.txt'
    trainset_path = './trainset/pdf_object_trainset_100_to_500_percent01.txt'
    text = poc.load_from_file(trainset_path)
    print('corpus length:', len(text))

    chars = sorted(list(set(text)))
    print('Total chars:', len(chars))
    # print(chars)

    # Vectorization
    print('Building dictionary index ...')
    char_indices = dict((c, i) for i, c in enumerate(chars))
    # print(char_indices)
    indices_char = dict((i, c) for i, c in enumerate(chars))
    # print(indices_char)

    # cut the text in semi-redundant sequences of maxlen characters
    maxlen = 50  # Good idea: use ave_object_len to determine this hyper-parameter
    step = 1  # should set to 1 for best result
    epochs = 10  # number of epochs for training
    sentences = []  # list of all sentence as input
    next_chars = []  # list of all next chars as labels
    for i in range(0, len(text) - maxlen, step):  # arg2 why this?
        sentences.append(text[i:i + maxlen])
        # print(sentences)
        next_chars.append(text[i + maxlen])
        # print(next_chars)

    print('semi sequences:', len(sentences))

    print('One-Hot vectorization...')
    x = np.zeros((len(sentences), maxlen, len(chars)),
                 dtype=np.bool)  # input x
    y = np.zeros((len(sentences), len(chars)), dtype=np.bool)  # output label y
    for i, sentence in enumerate(sentences):
        for t, char in enumerate(sentence):
            x[i, t, char_indices[char]] = 1
        y[i, char_indices[next_chars[i]]] = 1

    # build the model: a single LSTM layer # we need to deep it
    print('Build model...')
    model = Sequential()
    # model.add(LSTM(128, input_shape=(maxlen, len(chars))))
    model.add(
        LSTM(128, input_shape=(maxlen, len(chars)), return_sequences=True))
    # model.add(LSTM(128, input_shape=(maxlen, len(chars)),
    #                activation='relu', return_sequences=True, dropout=0.2))
    model.add(LSTM(128, input_shape=(maxlen, len(chars))))
    # model.add(LSTM(128, activation='relu', dropout=0.2))
    model.add(Dense(len(chars)))
    model.add(Activation('softmax'))

    optimizer = RMSprop(lr=0.01)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    input()
    # sys.exit()

    model.fit(x, y, batch_size=128, epochs=epochs,
              validation_split=0.2)  # why epochs=?
    save(model, epochs)

    # del model
    # model = load_model('./modelh5/lstm_text_generation_pdf_objs_1_20180214_235713_epochs10.h5')
    """ sampling the model and generate new object """
    diversities = [0.2, 0.5, 1.0, 1.2, 1.5, 1.8]
    # diversities = [0.1, 0.2, 0.3, 0.5, 0.7, 1, 1.2, 1.5, 1.7, 2]
    generated_obj_max_number = 5
    generated_obj_max_allowed_len = 500
    t_fuzz = 0.9
    p_t = 0.9  # 0.9 for format fuzzing and 0.5 or letter for data fuzzing. Now format fuzzing

    list_of_objects = poc.get_list_of_object(text)
    list_of_objects_with_maxlen = []
    for o in list_of_objects:
        if len(o) > maxlen:
            list_of_objects_with_maxlen.append(o)

    for diversity in diversities:
        obj_index = random.randint(0, len(list_of_objects_with_maxlen) - 1)
        generated_obj_counter = 0
        generated_obj_len_index = 0

        stop_condition = False
        print()
        print('-- Diversity:', diversity)

        # generated = ''
        obj_prefix = str(list_of_objects_with_maxlen[obj_index])[
            0:maxlen]  # len(sentence) equals 100 here

        generated = obj_prefix
        prob_vals = '100\n' * maxlen
        learnt_grammar = obj_prefix

        print('--- Generating text with seed:\n "' + obj_prefix + '"')
        sys.stdout.write(generated)

        if generated.endswith('endobj'):
            generated_obj_counter += 1

        if generated_obj_counter > generated_obj_max_number:
            stop_condition = True

        while not stop_condition:
            x_pred = np.zeros((1, maxlen, len(chars)))
            for t, char in enumerate(obj_prefix):
                x_pred[0, t, char_indices[char]] = 1.

            preds = model.predict(x_pred, verbose=0)[0]
            next_index, prob, preds2 = sample(preds, diversity)
            p_fuzz = random.random()
            if p_fuzz > t_fuzz and preds2[next_index] > p_t:
                next_index = np.argmin(preds2)
                print('FUZZ DONE!')

            next_char = indices_char[next_index]

            # print()
            # print(preds2)
            # print(np.argmax(preds))
            # print(preds[np.argmax(preds)])

            # print(prob)
            # print(np.argmax(prob))
            # print('====>',next_index)
            # print(prob[0, next_index])

            # prob_vals += str(preds2[next_index]) + '\n'
            # if preds2[next_index] > 0.9980:
            #     learnt_grammar += next_char
            # else:
            #     learnt_grammar += '.'
            # input()

            obj_prefix = obj_prefix[1:] + next_char
            generated += next_char
            generated_obj_len_index += 1

            if generated.endswith('endobj'):
                generated_obj_counter += 1
                generated_obj_len_index = 0
            elif generated_obj_len_index > generated_obj_max_allowed_len:
                generated += '\nendobj\n'
                generated_obj_counter += 1
                generated_obj_len_index = 0

            if generated_obj_counter > generated_obj_max_number:
                stop_condition = True

            sys.stdout.write(next_char)
            sys.stdout.flush()

        # save generated text to file inside program
        dt = datetime.datetime.now().strftime('_%Y%m%d_%H%M%S_')
        dir_name = './generated_results/pdfobjs_new/'
        file_name = 'gen_objs' + dt + 'epochs' + repr(epochs) + '_div' \
                    + repr(diversity) + '_step' + repr(step) + '.txt'
        poc.save_to_file(dir_name + file_name, generated)