def main(args): with open("./log.txt", "a") as myfile: myfile.write("\n \n \n \n {}".format(args)) args = set_args(args) best_encoder, best_decoder, best_prior, best_varflow, dataset = train_vae(args=args) with torch.no_grad(): plot_prior(args=args, flows=best_prior) # pdb.set_trace() plot_vardistr(args=args, dataset=dataset, flows=best_varflow, encoder=best_encoder) with open("./log.txt", "a") as myfile: myfile.write("!!Success!! \n \n \n \n".format(args)) print('Success!')
def main(): """Main workflow""" args = utils.build_args(argparse.ArgumentParser()) utils.init_logger(args.model_file) assert torch.cuda.is_available() torch.cuda.set_device(args.gpuid) utils.init_random(args.seed) utils.set_params(args) logger.info("Config:\n%s", pformat(vars(args))) fields = utils.build_fields() logger.info("Fields: %s", fields.keys()) logger.info("Load %s", args.train_file) train_data = LMDataset(fields, args.train_file, args.sent_length_trunc) logger.info("Training sentences: %d", len(train_data)) logger.info("Load %s", args.valid_file) val_data = LMDataset(fields, args.valid_file, args.sent_length_trunc) logger.info("Validation sentences: %d", len(val_data)) fields["sent"].build_vocab(train_data) train_iter = utils.build_dataset_iter(train_data, args) val_iter = utils.build_dataset_iter(val_data, args, train=False) if args.resume and os.path.isfile(args.checkpoint_file): logger.info("Resume training") logger.info("Load checkpoint %s", args.checkpoint_file) checkpoint = torch.load(args.checkpoint_file, map_location=lambda storage, loc: storage) es_stats = checkpoint["es_stats"] args = utils.set_args(args, checkpoint) else: checkpoint = None es_stats = ESStatistics(args) model = utils.build_model(fields, args, checkpoint) logger.info("Model:\n%s", model) optimizer = utils.build_optimizer(model, args, checkpoint) try_train_val(fields, model, optimizer, train_iter, val_iter, es_stats, args)
def parse_yaml(self): self.args = utils.set_args('artifact', setting) utils.set_output_dir(self.args) utils.set_logger(self.args) stream = open(self.yaml_file, "r") for data in yaml.load_all(stream): data_type = data["sources"][0]["type"] if eq(data_type, "FILE"): self.leave_file_log(data) elif eq(data_type, "COMMAND"): self.leave_command_log(data) else: continue self.compress_data(self.log_root_dir) shutil.rmtree(self.log_root_dir, ignore_errors=False, onerror=None) self.check_alive()
def home(): subjects, emotions = generator.load_assets('static/images/icons') if request.method == 'GET': session['text'] = [] args, values = utils.set_args() return render_template('index.html', emotions=emotions, subjects=sorted(subjects.keys()), images=None, combined=None, settings=args, values=values, error=None, n_inputs=3) elif request.method == 'POST': all_text = [] for idx in range(3): text_input = request.form.getlist('text' + str(idx + 1)) all_text.append(text_input) if np.all( np.logical_or( np.array(all_text) == np.array(session['text']), np.array(all_text) == '')): subjects_render = session['subjects'] emotions_render = session['emotions'] else: subjects_render, emotions_render = parser.predict(all_text) session['text'] = all_text session['subjects'] = subjects_render session['emotions'] = emotions_render subjects_render = [ x if all_text[i][0] != '' else [''] for i, x in enumerate(subjects_render) ] args, values = utils.get_args() error, images_encoded = None, [] try: motifs, combined = generator.generate_visual( icons=subjects, colors=emotions, topics=subjects_render, emotions=emotions_render, out_dir=None, **args) for motif in motifs: if motif is None: images_encoded.append('') else: images_encoded.append(utils.img_to_str(motif[:-20, ...])) combined_encoded = utils.img_to_str(combined) except: error = 'Sorry, there was an error generating motifs for the provided inputs. Please try again.' subjects_render = [['topic unknown'] if x[0] is None else x for x in subjects_render] subjects_render = [ x for i, x in enumerate(subjects_render) if all_text[i][0] != '' ] emotions_render = [ x for i, x in enumerate(emotions_render) if all_text[i][0] != '' ] return render_template('index.html', emotions=emotions, subjects=sorted(subjects.keys()), images=images_encoded, combined=combined_encoded, settings=args, values=values, error=error, emot_labels=emotions_render, subj_labels=subjects_render, n_inputs=3)
def api_call(): # Expects calls in the following format: # {'key': api_key, # 'text': [..., ..., ...], # 'algorithm': 'watercolors', # individual style args... # ... # } if not request.json or not 'key' in request.json or request.json[ 'key'] != app.config['API_KEY']: abort(400) subjects, emotions = generator.load_assets('static/images/icons') all_text = request.json.get('text', []) subjects_render, emotions_render = parser.predict(all_text) subjects_render = [ x if all_text[i][0] != '' else [''] for i, x in enumerate(subjects_render) ] args, _ = utils.set_args() spec_args = request.json.get('args', {}) for arg, val in spec_args.items(): args[arg] = val error, images_encoded = None, [] try: motifs, combined = generator.generate_visual(icons=subjects, colors=emotions, topics=subjects_render, emotions=emotions_render, out_dir=None, **args) for motif in motifs: if motif is None: images_encoded.append('') else: images_encoded.append( utils.img_to_str(motif[:-20, ...], web=False)) combined_encoded = utils.img_to_str(combined, web=False) except: error = 'Sorry, there was an error generating motifs for the provided inputs. Please try again.' return jsonify({'error': error}) subjects_render = [['topic unknown'] if x[0] is None else x for x in subjects_render] subjects_render = [ x for i, x in enumerate(subjects_render) if all_text[i][0] != '' ] emotions_render = [ x for i, x in enumerate(emotions_render) if all_text[i][0] != '' ] outputs = { 'topics': subjects_render, 'emotions': emotions_render, 'motifs': images_encoded, 'combined_motifs': combined_encoded } return jsonify(outputs)
import numpy as np from tabulate import tabulate import torch import torch.nn as nn import torch.backends.cudnn as cudnn from torch.optim import lr_scheduler from torch.distributions import Bernoulli from utils import Logger, read_json, write_json, save_checkpoint, set_args from models import * from rewards import compute_reward import vsum_tools args = set_args() torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False #python main.py -d datasets/eccv16_dataset_summe_google_pool5.h5 -s datasets/summe_splits.json -m summe --gpu 0 --save-dir log/summe-split0 --split-id 0 --verbose def main(): if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
def parser_arg(): parser = argparse.ArgumentParser() ## parser.add_argument('--exp_name', type=str, default='', help="the name of experiment") parser.add_argument('-g', '--gpu', type=int, dest='gpu', metavar='N', default=0, help="gpu") parser.add_argument('--num_threads', type=int, default=4, metavar='N', help="the number of threads (default: 1)") parser.add_argument('--seed', type=int, default=0, metavar='N', help='seed number. if 0, do not fix seed (default: 0)') parser.add_argument('--resume', type=str, default='', help='resume path') parser.add_argument('--dataset', type=str, default='CIFAR100', help='dataset', choices=['CIFAR10', 'CIFAR100', 'CUB200']) ## hyper-parameters parser.add_argument('--method', type=str, default='BaseMethod', metavar='METHOD', choices=METHOD_NAMES, help='model_names: ' + ' | '.join(METHOD_NAMES) + ' (defualt: BaseMethod)') parser.add_argument('--backbone', type=str, default='resnet18', metavar='BACKBONE', choices=BACKBONE_NAMES, help='Backbone models: ' + ' | '.join(BACKBONE_NAMES) + ' (default: resnet18)') parser.add_argument('--epochs', type=int, default=200, metavar='N', help="epoch (default: 200)") parser.add_argument('--batch_size', type=int, default=128, metavar='N', help="batch size (default: 128)") parser.add_argument('-t', type=float, default=3.0, help="temperature (default: 3.0)") parser.add_argument('-p', type=float, default=0.5, help="the probability of dropout (default: 0.5)") parser.add_argument( '-a', '--alpha', type=float, default=0.0, help="the balanced weight between losses (default: 0.0)") parser.add_argument('--beta', type=float, default=0.0, help="the weight for KD loss (default: 0.0)") parser.add_argument( '--eta', type=int, default=0, help="T_max value of Cosine Anealing weight for KD loss (default: 0)") parser.add_argument('--woAug', dest='aug', action='store_false', help="data augmentation or not (default: True)") parser.add_argument('--lambda_byot', type=float, default=0., help="hyperparams of byot loss") ## debug # args, _ = parser.parse_known_args('-g 0 --exp_name debug --seed 777 \ # --backbone resnet18_cifar --method SelfKD_KL --dataset CIFAR100 \ # --batch_size 128 --num_threads 4'.split()) ## real args, _ = parser.parse_known_args() return set_args(args)
def main(): if os.geteuid() != 0: print('This program should be run as root.') sys.exit(-1) selection = select_menu() if selection is '1': network = Net_Collector() network.start_collecting() del network artifacts = Artifact_Collector() artifacts.start_collecting() del artifacts cron = Set_Compressor() cron.set_cron() del cron elif selection is '2': try: if os.path.isfile(net_pid_lock): net_pid = file(net_pid_lock, 'r').read() print net_pid os.kill(int(net_pid), signal.SIGKILL) os.remove(net_pid_lock) if os.path.isfile(artifact_pid_lock): artifact_pid = file(artifact_pid_lock, 'r').read() print artifact_pid os.kill(int(artifact_pid), signal.SIGKILL) os.remove(artifact_pid_lock) except: print "error on canciling" pass elif selection is '3': artifacts = Artifact_Collector() artifacts.parse_yaml() del artifacts elif selection is '4': args = utils.set_args('dump', setting) utils.set_output_dir(args) utils.set_logger(args) c = Dump(args) for attr in dir(c): if attr != 'args' and not attr.startswith('_'): getattr(c, attr)() utils.set_zip_evidences(args) elif selection is '5': os.system('python /etc/makolli/compressor.py 0') elif selection is '6': s = Snort_Set() s.start_snort() elif selection is '7': print "\nbye!!\n" sys.exit() else: print "\n****************" print "wrong selection" print "****************\n"