Beispiel #1
0
 def __init__(self, config, cell_type, event_file, time_file, is_training):
     self.cell_type = cell_type
     self.event_file = event_file
     self.time_file = time_file
     self.num_layers = config.num_layers
     self.hidden_size = config.hidden_size
     self.g_size = config.g_size
     self.filter_output_dim = config.filter_output_dim
     self.filter_size = config.filter_size
     self.batch_size = config.batch_size
     self.num_steps = config.num_steps
     self.is_training = is_training
     self.keep_prob = config.keep_prob
     self.res_rate = config.res_rate
     self.length = config.output_length
     self.vocab_size = config.vocab_size
     self.learning_rate = config.learning_rate
     self.LAMBDA = config.LAMBDA
     self.delta = config.delta
     self.gamma = config.gamma
     self.event_to_id = read_data.build_vocab(self.event_file)
     self.train_data, self.valid_data, self.test_data = read_data.data_split(
         event_file, time_file)
     self.embeddings = tf.get_variable("embedding",
                                       [self.vocab_size, self.hidden_size],
                                       dtype=tf.float32)
     self.build()
 def __init__(self, config, cell_type, event_file, time_file, is_training):
     self.alpha = 1.0
     self.cell_type = cell_type
     self.event_file = event_file
     self.time_file = time_file
     self.num_layers = config.num_layers
     self.hidden_size = config.hidden_size
     self.g_size = config.g_size
     self.filter_output_dim = config.filter_output_dim
     self.filter_size = config.filter_size
     self.batch_size = config.batch_size
     self.num_steps = config.num_steps
     self.n_head, self.mh_size = 4, 50
     self.n_g = 3  # config.num_gen
     self.is_training = is_training
     self.keep_prob = config.keep_prob
     self.res_rate = config.res_rate
     self.length = 20  # config.output_length
     self.vocab_size = 52739  # config.vocab_size
     self.learning_rate = config.learning_rate
     self.lr = config.learning_rate
     self.LAMBDA = config.LAMBDA
     self.gamma = config.gamma
     self.train_data, self.valid_data, self.test_data = read_data.data_split(
         event_file, time_file, shuffle=True)
     self.embeddings = tf.get_variable("embedding",
                                       [self.vocab_size, self.hidden_size],
                                       dtype=tf.float32)
     self.sample_t = tf.placeholder(
         tf.float32, [self.batch_size, self.num_steps + self.length])
     self.target_t = tf.placeholder(tf.float32,
                                    [self.batch_size, self.length])
     self.inputs_t = tf.placeholder(tf.float32,
                                    [self.batch_size, self.num_steps])
     self.targets_e = tf.placeholder(tf.int64,
                                     [self.batch_size, self.length])
     self.input_e = tf.placeholder(tf.int64,
                                   [self.batch_size, self.num_steps])
     self.build()
Beispiel #3
0
FORMAT = "%(asctime)s - [line:%(lineno)s - %(funcName)10s() ] %(message)s"
DATA_TYPE = event_file.split('/')[-1].split('.')[0]
logging.basicConfig(filename='log/{}-{}-{}.log'.format(
    'MM-CPred', DATA_TYPE, str(datetime.datetime.now())),
                    level=logging.INFO,
                    format=FORMAT)

handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(FORMAT))
logging.getLogger().addHandler(handler)
logging.info('Start {}'.format(DATA_TYPE))

# Read data
train_data, valid_data, test_data = read_data.data_split(event_file,
                                                         time_file,
                                                         shuffle=True)

# initialize the model
model = Model.MM_CPred(args)

# Get Config and Create a Session
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.compat.v1.Session(config=config)
sess.run(tf.initialize_all_variables())

# Run Train Epoches
train_i_e, train_t_e, train_i_t, train_t_t = read_data.data_iterator(
    train_data, args.T, args.len, overlap=True)
Beispiel #4
0
import math
import matplotlib.pyplot as plt
from tensorflow import keras
# from sklearn.model_selection import train_test_split
from read_data import read_datasets, get_label, data_split, pair_batch_generator
from Lin_models import Lin_Net

# Load data&label
cover_dir = r'.\cover_dir'
stego_dir = r'.\stego_dir'
X_c = read_datasets(cover_dir)
X_s = read_datasets(stego_dir)

# Data preprocessing
X_train, X_val, X_test = data_split(X_c, X_s)
y_train = get_label(X_train.shape[0], positive=0.5)
y_val = get_label(X_val.shape[0], positive=0.5)
y_test = get_label(X_test.shape[0], positive=0.5)

X_train = X_train.reshape(-1, X_train.shape[1], 1)
X_val = X_val.reshape(-1, X_val.shape[1], 1)
X_test = X_test.reshape(-1, X_test.shape[1], 1)

y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_val = keras.utils.to_categorical(y_val, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)

# Build model
model = Lin_Net(X_train)

# Before fine-tuning the network, you should change the names of the HPF layer and Dense(2) layer.