async def test_2way_protocol(unused_tcp_port, no_tid_check, postgres_db, database_name): configure(unused_tcp_port, database_name, postgres_db.port) rs = Server() server = SessionSpy() rs.get_slice(SLICE_SESSION_MANAGER).add_listener(server) rs.add_slice(server) await rs.start() agent = Agent("agent") await agent.add_end_point_name("agent") agent.set_environment(uuid.uuid4()) await agent.start() await retry_limited(lambda: len(server.get_sessions()) == 1, 10) assert len(server.get_sessions()) == 1 await assert_agent_counter(agent, 1, 0) client = protocol.Client("client") status = await client.get_status_x(str(agent.environment)) assert status.code == 200 assert "agents" in status.result assert len(status.result["agents"]) == 1 assert status.result["agents"][0]["status"], "ok" await server.stop() await rs.stop() await agent.stop() await assert_agent_counter(agent, 1, 0)
async def test_agent_timeout(unused_tcp_port, no_tid_check, async_finalizer, postgres_db, database_name): from inmanta.config import Config configure(unused_tcp_port, database_name, postgres_db.port) Config.set("server", "agent-timeout", "1") rs = Server() server = SessionSpy() rs.get_slice(SLICE_SESSION_MANAGER).add_listener(server) rs.add_slice(server) await rs.start() async_finalizer(rs.stop) env = uuid.uuid4() # agent 1 agent = Agent("agent") await agent.add_end_point_name("agent") agent.set_environment(env) await agent.start() async_finalizer(agent.stop) # wait till up await retry_limited(lambda: len(server.get_sessions()) == 1, timeout=10) assert len(server.get_sessions()) == 1 await assert_agent_counter(agent, 1, 0) # agent 2 agent2 = Agent("agent") await agent2.add_end_point_name("agent") agent2.set_environment(env) await agent2.start() async_finalizer(agent2.stop) # wait till up await retry_limited(lambda: len(server.get_sessions()) == 2, timeout=10) assert len(server.get_sessions()) == 2 await assert_agent_counter(agent, 1, 0) await assert_agent_counter(agent2, 1, 0) # see if it stays up await check_sessions(server.get_sessions()) await sleep(1.1) assert len(server.get_sessions()) == 2 await check_sessions(server.get_sessions()) # take it down await agent2.stop() # Timeout=2 # -> 1sec: Wait for agent-timeout # -> 1sec: Wait until session bookkeeping is updated await retry_limited(lambda: len(server.get_sessions()) == 1, timeout=2) print(server.get_sessions()) await check_sessions(server.get_sessions()) assert server.expires == 1 await assert_agent_counter(agent, 1, 0) await assert_agent_counter(agent2, 1, 0)
def install(): fetch("http://ftp.gnome.org/pub/gnome/sources/json-glib/0.16/json-glib-%(json-glib)s.tar.xz") extract("json-glib-%(json-glib)s.tar.xz") configure( "json-glib-%(json-glib)s", ["--prefix=%s" % env.prefix, "--disable-gcov", "--disable-introspection", "CC=clang"] ) make("json-glib-%(json-glib)s") make("json-glib-%(json-glib)s", "install")
def install(): fetch_git('https://github.com/rafaelmartins/bluster.git', 'bluster') autogen('bluster') configure('bluster', ['--prefix=%s' % env.prefix, 'PKG_CONFIG_PATH=%s/lib/pkgconfig' % env.prefix, 'PATH=%s/bin:$PATH' % env.prefix, 'LDFLAGS="-Wl,--rpath -Wl,%s/lib -L%s/lib"' % (env.prefix, env.prefix), 'CFLAGS="-I%s/include"' % env.prefix, 'CC=clang']) make('bluster') make('bluster', 'install')
def install(): fetch('http://www.pell.portland.or.us/~orc/Code/discount/discount-%(discount)s.tar.bz2') extract('discount-%(discount)s.tar.bz2') configure('discount-%(discount)s', ['--prefix=%s' % env.prefix, '--libdir=%s/lib' % env.prefix, '--mandir=%s/man' % env.prefix, '--shared', '--enable-all-features'], 'configure.sh') run('sed -i .bkp -e "/ldconfig/d" %s/%s/librarian.sh' % (env.build, 'discount-%(discount)s' % env.versions)) make('discount-%(discount)s') make('discount-%(discount)s', 'install')
async def test_server_timeout(unused_tcp_port, no_tid_check, async_finalizer, postgres_db, database_name): from inmanta.config import Config configure(unused_tcp_port, database_name, postgres_db.port) Config.set("server", "agent-timeout", "1") async def start_server(): rs = Server() server = SessionSpy() rs.get_slice(SLICE_SESSION_MANAGER).add_listener(server) rs.add_slice(server) await rs.start() async_finalizer(rs.stop) return server, rs server, rs = await start_server() env = uuid.uuid4() # agent 1 agent = Agent("agent") await agent.add_end_point_name("agent") agent.set_environment(env) await agent.start() async_finalizer(agent.stop) # wait till up await retry_limited(lambda: len(server.get_sessions()) == 1, 10) assert len(server.get_sessions()) == 1 await assert_agent_counter(agent, 1, 0) await rs.stop() # timeout await sleep(1.1) # check agent disconnected await assert_agent_counter(agent, 1, 1) # recover server, rs = await start_server() await retry_limited(lambda: len(server.get_sessions()) == 1, 10) assert len(server.get_sessions()) == 1 await assert_agent_counter(agent, 2, 1)
def get_config(key, callback=None): cfg = configure() config_dir = cfg.get("general").get("config_dir") filename = os.path.join(config_dir, key + ".yml") if not os.path.exists(filename): return dict() stream = file(filename, "r") cfg = yaml.load(stream) if callback is not None: subtask(callback).delay(cfg) return cfg
def install(): fetch("http://www.fastcgi.com/dist/fcgi-%(fcgi)s.tar.gz") extract("fcgi-%(fcgi)s.tar.gz") configure("fcgi-%(fcgi)s", ["--prefix=%s" % env.prefix]) make("fcgi-%(fcgi)s") make("fcgi-%(fcgi)s", "install")
def main(): args = parser.parse_args() print(args) config = configure(args.config) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # define model name setup_list = [ args.src, args.tgt, args.network, f"contrast_dim_{args.contrast_dim}", f"temperature_{args.temperature}", f"alpha_{args.alpha}", f"cw_{args.cw}", f"thresh_{args.thresh}", f"max_key_size_{args.max_key_size}", f"min_conf_samples_{args.min_conf_samples}", f"gpu_{args.gpu}" ] model_name = "_".join(setup_list) print(colored(f"Model name: {model_name}", 'green')) model_dir = os.path.join(args.log_dir, model_name) if os.path.isdir(model_dir): shutil.rmtree(model_dir) os.mkdir(model_dir) summary_writer = SummaryWriter(model_dir) # save parsed arguments with open(os.path.join(model_dir, 'parsed_args.txt'), 'w') as f: json.dump(args.__dict__, f, indent=2) dataset_name = get_dataset_name(args.src, args.tgt) dataset_config = config.data.dataset[dataset_name] src_file = os.path.join(args.dataset_root, dataset_name, args.src + '_list.txt') tgt_file = os.path.join(args.dataset_root, dataset_name, args.tgt + '_list.txt') model = Model(base_net=args.network, num_classes=dataset_config.num_classes, contrast_dim=args.contrast_dim, frozen_layer=args.frozen_layer) model_ema = Model(base_net=args.network, num_classes=dataset_config.num_classes, contrast_dim=args.contrast_dim, frozen_layer=args.frozen_layer) moment_update(model, model_ema, 0) model = model.cuda() model_ema = model_ema.cuda() contrast_loss = InfoNCELoss(temperature=args.temperature).cuda() src_memory = KeyMemory(args.max_key_size, args.contrast_dim).cuda() tgt_memory = KeyMemory(args.max_key_size, args.contrast_dim).cuda() tgt_pseudo_labeler = KMeansPseudoLabeler( num_classes=dataset_config.num_classes, batch_size=args.pseudo_batch_size) parameters = model.get_parameter_list() group_ratios = [parameter['lr'] for parameter in parameters] optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) assert args.lr_scheduler == 'inv' lr_scheduler = InvScheduler(gamma=args.gamma, decay_rate=args.decay_rate, group_ratios=group_ratios, init_lr=args.lr) trainer = Train(model, model_ema, optimizer, lr_scheduler, model_dir, summary_writer, src_file, tgt_file, contrast_loss, src_memory, tgt_memory, tgt_pseudo_labeler, cw=args.cw, thresh=args.thresh, min_conf_samples=args.min_conf_samples, num_classes=dataset_config.num_classes, batch_size=args.batch_size, eval_batch_size=args.eval_batch_size, num_workers=args.num_workers, max_iter=args.max_iterations, iters_per_epoch=dataset_config.iterations_per_epoch, log_summary_interval=args.log_summary_interval, log_image_interval=args.log_image_interval, num_proj_samples=args.num_project_samples, acc_metric=dataset_config.acc_metric, alpha=args.alpha) tgt_best_acc = trainer.train() # write to text file # with open(args.acc_file, 'a') as f: # f.write(model_name + ' ' + str(tgt_best_acc) + '\n') # f.close() # write to xlsx file write_list = [ args.src, args.tgt, args.network, args.contrast_dim, args.temperature, args.alpha, args.cw, args.thresh, args.max_key_size, args.min_conf_samples, args.gpu, tgt_best_acc ] with open(args.acc_file, 'a') as f: csv_writer = csv.writer(f) csv_writer.writerow(write_list)
def parse(self): self.opt = self.parser.parse_args() configure(self.opt) return self.opt
def main(): args = parser.parse_args() print(args) config = configure(args.config) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu print(colored(f"Model directory: {args.model_dir}", 'green')) assert os.path.isfile(args.model_dir) dataset_name = get_dataset_name(args.src, args.tgt) dataset_config = config.data.dataset[dataset_name] src_file = os.path.join(args.dataset_root, dataset_name, args.src + '_list.txt') tgt_file = os.path.join(args.dataset_root, dataset_name, args.tgt + '_list.txt') model = Model(base_net=args.network, num_classes=dataset_config.num_classes, frozen_layer='') del model.classifier_layer del model.contrast_layer model_state_dict = model.state_dict() trained_state_dict = torch.load(args.model_dir)['weights'] keys = set(model_state_dict.keys()) trained_keys = set(trained_state_dict.keys()) shared_keys = keys.intersection(trained_keys) to_load_state_dict = {key: trained_state_dict[key] for key in shared_keys} model.load_state_dict(to_load_state_dict) model = model.cuda() # source classifier and domain classifier src_classifier = nn.Sequential( nn.Dropout(0.5), nn.Linear(model.base_network.out_dim, dataset_config.num_classes)) initialize_layer(src_classifier) parameter_list = [{"params": src_classifier.parameters(), "lr": 1}] src_classifier = src_classifier.cuda() domain_classifier = nn.Sequential(nn.Dropout(0.5), nn.Linear(model.base_network.out_dim, 2)) initialize_layer(domain_classifier) parameter_list += [{"params": domain_classifier.parameters(), "lr": 1}] domain_classifier = domain_classifier.cuda() group_ratios = [parameter['lr'] for parameter in parameter_list] optimizer = torch.optim.SGD(parameter_list, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) assert args.lr_scheduler == 'inv' lr_scheduler = InvScheduler(gamma=args.gamma, decay_rate=args.decay_rate, group_ratios=group_ratios, init_lr=args.lr) # split into train and validation sets src_size = len(open(src_file).readlines()) src_train_size = int(args.train_portion * src_size) src_train_indices, src_test_indices = np.split( np.random.permutation(src_size), [src_train_size]) tgt_size = len(open(tgt_file).readlines()) tgt_train_size = int(args.train_portion * tgt_size) tgt_train_indices, tgt_test_indices = np.split( np.random.permutation(tgt_size), [tgt_train_size]) # define data loaders train_data_loader_kwargs = { 'shuffle': True, 'drop_last': True, 'batch_size': args.batch_size, 'num_workers': args.num_workers } test_data_loader_kwargs = { 'shuffle': False, 'drop_last': False, 'batch_size': args.eval_batch_size, 'num_workers': args.num_workers } train_transformer = get_transform(training=True) test_transformer = get_transform(training=False) data_loader = {} data_iterator = {} src_train_dataset = IndicesDataset(src_file, list(src_train_indices), transform=train_transformer) data_loader['src_train'] = DataLoader(src_train_dataset, **train_data_loader_kwargs) src_test_dataset = IndicesDataset(src_file, list(src_test_indices), transform=test_transformer) data_loader['src_test'] = DataLoader(src_test_dataset, **test_data_loader_kwargs) tgt_train_dataset = IndicesDataset(tgt_file, list(tgt_train_indices), transform=train_transformer) data_loader['tgt_train'] = DataLoader(tgt_train_dataset, **train_data_loader_kwargs) tgt_test_dataset = IndicesDataset(tgt_file, list(tgt_test_indices), transform=test_transformer) data_loader['tgt_test'] = DataLoader(tgt_test_dataset, **test_data_loader_kwargs) for key in data_loader: data_iterator[key] = iter(data_loader[key]) # start training total_progress_bar = tqdm.tqdm(desc='Iterations', total=args.max_iterations, ascii=True, smoothing=0.01) class_criterion = nn.CrossEntropyLoss() model.base_network.eval() src_classifier.train() domain_classifier.train() iteration = 0 while iteration < args.max_iterations: lr_scheduler.adjust_learning_rate(optimizer, iteration) optimizer.zero_grad() src_data = get_sample(data_loader, data_iterator, 'src_train') src_inputs, src_labels = src_data['image_1'].cuda( ), src_data['true_label'].cuda() tgt_data = get_sample(data_loader, data_iterator, 'tgt_train') tgt_inputs = tgt_data['image_1'].cuda() model.set_bn_domain(domain=0) with torch.no_grad(): src_features = model.base_network(src_inputs) src_features = F.normalize(src_features, p=2, dim=1) src_class_logits = src_classifier(src_features) src_domain_logits = domain_classifier(src_features) model.set_bn_domain(domain=1) with torch.no_grad(): tgt_features = model.base_network(tgt_inputs) tgt_features = F.normalize(tgt_features, p=2, dim=1) tgt_domain_logits = domain_classifier(tgt_features) src_classification_loss = class_criterion(src_class_logits, src_labels) domain_logits = torch.cat([src_domain_logits, tgt_domain_logits], dim=0) domain_labels = torch.tensor([0] * src_inputs.size(0) + [1] * tgt_inputs.size(0)).cuda() domain_classification_loss = class_criterion(domain_logits, domain_labels) if iteration % args.print_acc_interval == 0: compute_accuracy(src_class_logits, src_labels, acc_metric=dataset_config.acc_metric, print_result=True) compute_accuracy(domain_logits, domain_labels, acc_metric='total_mean', print_result=True) total_loss = src_classification_loss + domain_classification_loss total_loss.backward() optimizer.step() iteration += 1 total_progress_bar.update(1) # test model.base_network.eval() src_classifier.eval() domain_classifier.eval() with torch.no_grad(): src_all_class_logits = [] src_all_labels = [] src_all_domain_logits = [] model.set_bn_domain(domain=0) for src_test_data in tqdm.tqdm(data_loader['src_test'], desc='src_test', leave=False, ascii=True): src_test_inputs, src_test_labels = src_test_data['image_1'].cuda( ), src_test_data['true_label'].cuda() src_test_features = model.base_network(src_test_inputs) src_test_features = F.normalize(src_test_features, p=2, dim=1) src_test_class_logits = src_classifier(src_test_features) src_test_domain_logits = domain_classifier(src_test_features) src_all_class_logits += [src_test_class_logits] src_all_labels += [src_test_labels] src_all_domain_logits += [src_test_domain_logits] src_all_class_logits = torch.cat(src_all_class_logits, dim=0) src_all_labels = torch.cat(src_all_labels, dim=0) src_all_domain_logits = torch.cat(src_all_domain_logits, dim=0) src_test_class_acc = compute_accuracy( src_all_class_logits, src_all_labels, acc_metric=dataset_config.acc_metric, print_result=True) src_test_domain_acc = compute_accuracy( src_all_domain_logits, torch.zeros(src_all_domain_logits.size(0)).cuda(), acc_metric='total_mean', print_result=True) tgt_all_domain_logits = [] model.set_bn_domain(domain=1) for tgt_test_data in tqdm.tqdm(data_loader['tgt_test'], desc='tgt_test', leave=False, ascii=True): tgt_test_inputs = tgt_test_data['image_1'].cuda() tgt_test_features = model.base_network(tgt_test_inputs) tgt_test_features = F.normalize(tgt_test_features, p=2, dim=1) tgt_test_domain_logits = domain_classifier(tgt_test_features) tgt_all_domain_logits += [tgt_test_domain_logits] tgt_all_domain_logits = torch.cat(tgt_all_domain_logits, dim=0) tgt_test_domain_acc = compute_accuracy( tgt_all_domain_logits, torch.ones(tgt_all_domain_logits.size(0)).cuda(), acc_metric='total_mean', print_result=True) write_list = [ args.model_dir, src_test_class_acc, src_test_domain_acc, tgt_test_domain_acc ] # with open('hyper_search_office_home.csv', 'a') as f: with open(args.output_file, 'a') as f: csv_writer = csv.writer(f) csv_writer.writerow(write_list)
import os import sys from utils import configure cfg = configure() BASE_DIR = os.path.dirname(__file__) MODULES_DIR = os.path.join(BASE_DIR, "mods") sys.path.append(BASE_DIR) BROKER_BACKEND = "amqp" BROKER_HOST = cfg.get("amqp").get("host", "localhost") BROKER_PORT = cfg.get("amqp").get("port", 5672) BROKER_USER = cfg.get("amqp").get("username", "guest") BROKER_PASSWORD = cfg.get("amqp").get("password", "guest") BROKER_VHOST = cfg.get("amqp").get("virtual_host", "") CELERY_RESULT_BACKEND = "amqp" CELERY_AMQP_TASK_RESULT_EXPIRES = 300 #CELERY_CACHE_BACKEND = "memcached://127.0.0.1:11211/" CELERY_DEFAULT_QUEUE = "default" CELERY_DEFAULT_EXCHANGE = "domestos" CELERY_DEFAULT_EXCHANGE_TYPE = "topic" CELERY_ROUTES = ("routers.DomestosRouter", ) task_imports = ["mods.%s" % (m[:-3]) \ for m in os.listdir(MODULES_DIR) \ if m.endswith(".py") \ and not m.startswith("__init__")]
def main(): args = parser.parse_args() print(args) config = configure(args.config) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu print(colored(f"Model directory: {args.model_dir}", 'green')) assert os.path.isfile(args.model_dir) dataset_name = get_dataset_name(args.src, args.tgt) dataset_config = config.data.dataset[dataset_name] tgt_file = os.path.join(args.dataset_root, dataset_name, args.tgt + '_list.txt') # tgt classification model = Model(base_net=args.network, num_classes=dataset_config.num_classes, frozen_layer='') del model.classifier_layer del model.contrast_layer model_state_dict = model.state_dict() trained_state_dict = torch.load(args.model_dir)['weights'] keys = set(model_state_dict.keys()) trained_keys = set(trained_state_dict.keys()) shared_keys = keys.intersection(trained_keys) to_load_state_dict = {key: trained_state_dict[key] for key in shared_keys} model.load_state_dict(to_load_state_dict) model = model.cuda() # define data loader test_data_loader_kwargs = { 'shuffle': False, 'drop_last': False, 'batch_size': args.batch_size, 'num_workers': args.num_workers } test_transformer = get_transform(training=False) data_loader = {} data_iterator = {} tgt_test_dataset = DefaultDataset(tgt_file, transform=test_transformer) data_loader['tgt_test'] = DataLoader(tgt_test_dataset, **test_data_loader_kwargs) for key in data_loader: data_iterator[key] = iter(data_loader[key]) # test model.base_network.eval() with torch.no_grad(): tgt_all_features = [] tgt_all_labels = [] model.set_bn_domain(domain=1) for tgt_test_data in tqdm.tqdm(data_loader['tgt_test'], desc='tgt_test', leave=False, ascii=True): tgt_test_inputs, tgt_test_labels = tgt_test_data['image_1'].cuda( ), tgt_test_data['true_label'].cuda() tgt_test_features = model.base_network(tgt_test_inputs) # tgt_test_features = F.normalize(tgt_test_features, p=2, dim=1) tgt_all_features += [tgt_test_features] tgt_all_labels += [tgt_test_labels] tgt_all_features = torch.cat(tgt_all_features, dim=0) tgt_all_labels = torch.cat(tgt_all_labels, dim=0) tgt_all_features = tgt_all_features.cpu().numpy() tgt_all_labels = tgt_all_labels.cpu().numpy() features_pickle_file = os.path.join('features', args.src + '_' + args.tgt, 'tgt_features.pkl') labels_pickle_file = os.path.join('features', args.src + '_' + args.tgt, 'tgt_labels.pkl') pickle.dump(tgt_all_features, open(features_pickle_file, 'wb')) pickle.dump(tgt_all_labels, open(labels_pickle_file, 'wb'))
import os import sys from utils import configure cfg = configure() BASE_DIR = os.path.dirname(__file__) MODULES_DIR = os.path.join(BASE_DIR, "mods") sys.path.append(BASE_DIR) BROKER_BACKEND = "amqp" BROKER_HOST = cfg.get("amqp").get("host", "localhost") BROKER_PORT = cfg.get("amqp").get("port", 5672) BROKER_USER = cfg.get("amqp").get("username", "guest") BROKER_PASSWORD = cfg.get("amqp").get("password", "guest") BROKER_VHOST = cfg.get("amqp").get("virtual_host", "") CELERY_RESULT_BACKEND = "amqp" CELERY_AMQP_TASK_RESULT_EXPIRES = 300 #CELERY_CACHE_BACKEND = "memcached://127.0.0.1:11211/" CELERY_DEFAULT_QUEUE = "default" CELERY_DEFAULT_EXCHANGE = "domestos" CELERY_DEFAULT_EXCHANGE_TYPE = "topic" CELERY_ROUTES = ("routers.DomestosRouter", ) task_imports = ["mods.%s" % (m[:-3]) \ for m in os.listdir(MODULES_DIR) \ if m.endswith(".py") \
Test framework sources used to perform the tests required by paper: "Classifying Processes Instances Using Recurrent Neural Networks" by Markku Hinkka, Teemu Lehto, Keijo Heljanko and Alexander Jung """ import lasagne from lasagne.layers import * import numpy as np import theano as theano import theano.tensor as T import time import operator from utils import load_traces, generate_traces, draw_charts, configure from model import Model, trace_registry import matplotlib.pyplot as plt configure(output_path="C:/Users/User/Dropbox/Aalto/testing/testruns/") filePath = "D:/dev/aalto/papers/nn-predictions/src/" trace_registry["bpic14_dur"] = lambda trace_length_modifier: load_traces( "bpic14_dur", filePath + "rabobank.csv", lambda row: row[1] == "1", trace_length_modifier, 40000) trace_registry["bpic14_rfi"] = lambda trace_length_modifier: load_traces( "bpic14_rfi", filePath + "rabobank.csv", lambda row: row[2] == "request for information", trace_length_modifier, 40000) trace_registry["bpic12_dur"] = lambda trace_length_modifier: load_traces( "bpic12_dur", filePath + "BPIC12.csv", lambda row: row[1] == "1", trace_length_modifier) trace_registry["bpic13_dur"] = lambda trace_length_modifier: load_traces( "bpic13_dur", filePath + "BPIC13.csv", lambda row: row[1] == "1", trace_length_modifier) trace_registry["bpic17_dur"] = lambda trace_length_modifier: load_traces(