def main(_): opt=options.Options() print('num_paths: ',opt.num_paths) print('path_length: ',opt.path_length) print('embedding_size: ',opt.embedding_size) print('learning_rate: ',opt.learning_rate) print('decay_rate: ',opt.decay_rate) print('epochs_to_train: ',opt.epochs_to_train) print('num_sequences: ',opt.num_sequences) print('num_skips: ',opt.num_skips) print('------------------------') # Build network, Train model, Obtain embeddings with tf.Graph().as_default(), tf.Session() as session: with tf.device('/cpu:0'): model=DeepWalk(session,opt) print("Build dataset...") model.build_dataset() print("Build model...") train_inputs,train_labels=model.build_model() model.train_model(train_inputs,train_labels) model.saver.save(session,os.path.join(opt.save_path,'model.ckpt')) # Get the embeddings at certain time for i in range(model.num_time): single_embeddings=model.single_time_embeddings(i) # Task for embeddings tasks.run_task(opt.task_name,single_embeddings,model.graph_filename,model.dict_filename)
def train_model(self, train_inputs, train_labels): # Train the model with number of epochs num_epochs=self._options.epochs_to_train self.init.run() average_loss=0 for epoch in range(num_epochs): batch_input,batch_labels=self.generate_batch_list() feed_dict={train_inputs:batch_input, train_labels:batch_labels} _,loss_val=self._session.run([self.optimizer,self.loss],feed_dict=feed_dict) average_loss+=loss_val if epoch%2000==0: if epoch>0: average_loss/=2000 print('Average loss at step ',epoch,': ',average_loss) average_loss=0 if epoch%2000==0: print('Predicting accuracy at step ',epoch,": ") self.final_embeddings=self.normalized_dynamic_embeddings.eval() #self.final_embeddings=self.normalized_embeddings.eval() single_embeddings=self.single_time_embeddings() tasks.run_task(self._options.task_name,single_embeddings,self.graph_filename,self.dict_filename) self.final_embeddings=self.normalized_dynamic_embeddings.eval()
def stop(self): """ Stops VNF instance. """ if self.is_running(): self._logger.info('Killing WL...') # force termination of VNF and wait to terminate; It will avoid # sporadic reboot of host. super(QemuVM, self).kill(signal='-9', sleep=10) # remove shared dir if it exists to avoid issues with file consistency if os.path.exists(self._shared_dir): tasks.run_task(['rm', '-f', '-r', self._shared_dir], self._logger, 'Removing content of shared directory...', True) self._running = False
def singlescale_run(opt, is_reconstruct, save_path): print('task_name:',opt.task_name) print('num_paths: ',opt.num_paths) print('path_length: ',opt.path_length) print('embedding_size: ',opt.embedding_size) print('learning_rate: ',opt.learning_rate) print('decay_rate: ',opt.decay_rate) print('epochs_to_train: ',opt.epochs_to_train) print('num_sequences: ',opt.num_sequences) print('num_skips: ',opt.num_skips) print('------------------------') save_config(opt,save_path) # Build network, Train model, Obtain embeddings with tf.Graph().as_default(), tf.Session() as session: with tf.device('/cpu:0'): model=TemporalDeepWalk(session,opt) print("Build dataset...") begin_time_str='2010/10/01 00:00:00' end_time_str='2010/10/24 23:59:59' scale_len_str='2010/01/4 00:00:00' split_options=temporal_graph.GraphSplitOptions(begin_time_str,end_time_str,scale_len_str) model.build_dataset(split_options,is_reconstruct,save_path) print("Build model...") train_inputs,train_labels=model.build_model() model.train_model(train_inputs,train_labels) model.saver.save(session,os.path.join(save_path,'model.ckpt')) model.save_dynamic_embeddings(os.path.join(save_path,'embeddings.txt')) # Get the embeddings at certain time, shows that with more temporal information, accuracy imporves '''for i in range(model.num_time): single_embeddings=model.final_embeddings[:,i,:] # Task for embeddings tasks.run_task(opt.task_name,single_embeddings,model.filenames)''' # Task for embeddings if not is_reconstruct: print('begin task relation inferring') return tasks.run_task('colleague_relations',model.final_embeddings[:,-1,:],model.filenames) else: print('begin task link reconstruction') return tasks.run_task('link_reconstruction',model.final_embeddings[:,-1,:],model.filenames)
async def create_task(request): app = request.app task_id = await store.create_task(app['db']) asyncio.create_task(tasks.run_task(app['db'], task_id)) return json_response(dict(task_id=str(task_id)))
def cleanup_scapy_files(): folder = os.path.join('/tmp', 'scapy') if os.path.exists(folder): for file in os.listdir(folder): fi = os.path.join(folder, file) stdout, stderr = tasks.run_task(['rm', fi], _LOGGER, 'Remove File', True)
def main(_): opt = options.Options() with tf.Graph().as_default(), tf.Session() as session: with tf.device('/cpu:0'): # Initialize state embedding_size = opt.embedding_size init_model = SingleTimeIterDeepWalk(session, opt) nodes, _ = init_model.build_dataset([], [], 0, 31) edges = [] states = np.random.rand(init_model.graph_size, embedding_size).astype(np.float32) time_interval = 3 end_time = 25 for i in range(0, end_time, time_interval): print('Model ', i, ':') # Build network, Train model, Obtain embeddings with tf.Graph().as_default(), tf.Session() as session: with tf.device('/cpu:0'): model = SingleTimeIterDeepWalk(session, opt) _, edges = model.build_dataset(nodes, edges, i, time_interval) train_inputs, train_labels = model.build_model(states) model.train_model(train_inputs, train_labels) model.saver.save(session, os.path.join(opt.save_path, 'model.ckpt')) # Task for embeddings if opt.task_name == 'link prediction': tasks.run_task(opt.task_name, states, init_model.filenames) else: tasks.run_task(opt.task_name, model.final_embeddings, init_model.filenames) # Update states for next time states = model.final_embeddings print('--------------')
def normal_run(opt, save_path): print('num_paths: ', opt.num_paths) print('path_length: ', opt.path_length) print('embedding_size: ', opt.embedding_size) print('learning_rate: ', opt.learning_rate) print('decay_rate: ', opt.decay_rate) print('epochs_to_train: ', opt.epochs_to_train) print('num_sequences: ', opt.num_sequences) print('num_skips: ', opt.num_skips) print('------------------------') save_config(opt, save_path) # Build network, Train model, Obtain embeddings with tf.Graph().as_default(), tf.Session() as session: with tf.device('/cpu:0'): model = DeepWalk(session, opt) begin_time_str = begin_time_list[opt.begin_time] stop_time_str = '2010/10/24 23:59:59' split_options = graph.GraphSplitOptions(begin_time_str, stop_time_str) model.build_dataset(split_options, False, save_path) train_inputs, train_labels = model.build_model() model.train_model(train_inputs, train_labels) model.saver.save(session, os.path.join(save_path, 'model.ckpt')) model.save_embeddings(os.path.join(save_path, 'embeddings.txt')) # Task for embeddings print('begin task relation inferring') inferring_task = tasks.run_task('colleague_relations', model.final_embeddings, model.filenames) #print('begin task potential link prediction') #prediction2_task=tasks.run_task('potential_link_prediction',model.final_embeddings,model.filenames) return inferring_task
def create_scapy_out(fname): command = S.getValue('COMMAND') stdout, stderr = tasks.run_task( [command, '-json', fname, '-o', '/tmp', '--scapy'], _LOGGER, 'Create Dir', True)
def test_run_task_class(): assert run_task(TEST_CLASS_NAME, {"operands": [3, 2, 8]}) == 48, "Bad answer!"
def test_run_decorated_func(): assert run_task(TEST_FUNC_NAME, { "msg": "hello", "count": 2 }) == 'hello\nhello', "Bad answer!"
def multiscale_run(opt, is_reconstruct, save_path): print('task_name:',opt.task_name) print('num_paths: ',opt.num_paths) print('path_length: ',opt.path_length) print('embedding_size: ',opt.embedding_size) print('learning_rate: ',opt.learning_rate) print('decay_rate: ',opt.decay_rate) print('epochs_to_train: ',opt.epochs_to_train) print('num_sequences: ',opt.num_sequences) print('num_skips: ',opt.num_skips) print('------------------------') save_config(opt,save_path) g1=tf.Graph() with g1.as_default(), tf.Session() as session: with tf.device('/cpu:0'): # HOUR scale model1=TemporalDeepWalk(session,opt) print("Build dataset...") begin_time_str='2010/10/24 00:00:00' end_time_str='2010/10/24 23:59:59' scale_len_str='2010/01/01 03:00:00' split_options=temporal_graph.GraphSplitOptions(begin_time_str,end_time_str,scale_len_str) model1.build_dataset(split_options,is_reconstruct,save_path) print("Build model...") train_inputs,train_labels=model1.build_model() model1.train_model(train_inputs,train_labels) model1.saver.save(session,os.path.join(save_path,'model1.ckpt')) #model.save_dynamic_embeddings(os.path.join(save_path,'embeddings1.txt')) g2=tf.Graph() with g2.as_default(), tf.Session() as session: with tf.device('/cpu:0'): # DAY scale model2=TemporalDeepWalk(session,opt) print("Build dataset...") begin_time_str='2010/10/01 00:00:00' end_time_str='2010/10/24 23:59:59' scale_len_str='2010/01/4 00:00:00' split_options=temporal_graph.GraphSplitOptions(begin_time_str,end_time_str,scale_len_str) model2.build_dataset(split_options,is_reconstruct,save_path) print("Build model...") train_inputs,train_labels=model2.build_model() model2.train_model(train_inputs,train_labels) model2.saver.save(session,os.path.join(save_path,'model2.ckpt')) #model2.save_embeddings(filename='temp/embeddings2.txt') filenames=model2.filenames embeddings=np.concatenate((model1.final_embeddings[:,-1,:],model2.final_embeddings[:,-1,:]),axis=1) save_embeddings_for_multiscale((model1.final_embeddings[:,-1,:],model2.final_embeddings[:,-1,:]),os.path.join(save_path,'embeddings.txt')) # Task for embeddings if not is_reconstruct: print('begin task relation inferring') return tasks.run_task('colleague_relations',embeddings,model.filenames) else: print('begin task link reconstruction') return tasks.run_task('link_reconstruction',embeddings,model.filenames)