def main(): # Parse the JSON arguments config_args = None try: config_args = parse_args() except Exception as e: print(e) print("Add a config file using \'--config file_name.json\'") exit(1) tf.reset_default_graph() config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=config_args.num_envs, inter_op_parallelism_threads=config_args.num_envs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Prepare Directories config_args.experiment_dir, config_args.summary_dir, config_args.checkpoint_dir, config_args.output_dir, config_args.test_dir = \ create_experiment_dirs(config_args.experiment_dir) a2c = A2C(sess, config_args) if config_args.to_train: a2c.train() if config_args.to_test: a2c.test(total_timesteps=10000000)
def main(): # Parse the JSON arguments config_args = None try: config_args = parse_args() except: print("Add a config file using \'--config file_name.json\'") exit(1) tf.reset_default_graph() config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=config_args.num_envs, inter_op_parallelism_threads=config_args.num_envs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Prepare Directories # TODO: add openai logger config_args.experiment_dir, config_args.summary_dir, config_args.checkpoint_dir, config_args.output_dir, config_args.test_dir = \ create_experiment_dirs(config_args.experiment_dir) logger.configure(config_args.experiment_dir) logger.info("Print configuration .....") logger.info(config_args) acktr = ACKTR(sess, config_args) if config_args.to_train: acktr.train() if config_args.to_test: acktr.test(total_timesteps=10000000)
def main(): args = parse_args() # print(args.file, args.number, args.save) config = get_config(args) gen_example = gen(config) gen_example.run() command = 'rm -r ./tmps' os.system(command)
def main(): args = utils.parse_args() config = utils.load_config_file(args.config_file) # Parse available execution options if config['action']['execute'] == 'download_dataset': wikihow = Wikihow.Wikihow(config) wikihow.download() elif config['action']['execute'] == 'action_identifier': action_identifier = ActionIdentifier.ActionIdentifier(config) action_identifier.run() elif config['action']['execute'] == 'dataset_statistics': wikihow = Wikihow.Wikihow(config) wikihow.get_statistics() else: print("Invalid execute parameter in config file {}".format( args.config_file)) exit()
from utils.utils import parse_args, import_model, PerceptualLoss, setting_cuda from torch.utils.data import DataLoader from torchvision import transforms from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau from data_loader import TrainDataset, Valid_Dataset from torch import nn import os import random import scipy.misc import torch import numpy as np import collections parser = parse_args() def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True # Set random number seed setup_seed(66) # Preprocess and load data transformations = transforms.Compose( [transforms.ToTensor(), transforms.Normalize([0.44], [0.26])])
from utils import preprocess from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau from keras.models import load_model from keras.optimizers import Adam from models.multi_gpu import ModelMGPU from models.losses import * from models.unet import unet os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' if __name__ == "__main__": results = utils.parse_args("train") NUM_GPUS = 1 if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" elif results.GPUID == -1: # find maximum number of available GPUs call = "nvidia-smi --list-gpus" pipe = Popen(call, shell=True, stdout=PIPE).stdout available_gpus = pipe.read().decode().splitlines() NUM_GPUS = len(available_gpus) else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) num_channels = results.num_channels plane = results.plane
from common_crawl.CassandraSentences import CassandraSentences from utils.utils import parse_args if __name__ == "__main__": program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) logging.basicConfig( filename='cc_log.txt', format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG) logger.info("running %s" % ' '.join(sys.argv)) # check and process input arguments args = parse_args(sys.argv[1:]) for arg in ['input', 'output']: if not arg in args: logger.error('Argument ' + arg + ' is missing') sys.exit(1) limit, workers, min_count, size, input, output = args['limit'], args['workers'], args['min_count'], \ args['size'], args['input'], args['output'] # log arguments logger.info('Training with: ' + ' '.join([k + " : " + str(v) for k, v in args.iteritems()])) # import data from cassandra sentences = CassandraSentences(input, 'ngramspace', limit)
display_step = args.display_step batch_size = args.batch_size #train_data, test_data, n_user, n_item = load_data_rating(path="../Data/ml100k/movielens_100k.dat", train_data, test_data, n_user, n_item ,n_dire= load_data_rating_menu_dire_neg_pos(trainpath="../data/ml1m/train_1m_ratings.dat", testpath="../data/ml1m/test_1m_ratings.dat", #train_data, test_data, n_user, n_item = load_data_rating(path="../Data/ml1m/ratings_date_dire_t.dat", header=['user_id', 'item_id', 'rating', 'timestamp','dire_thistime','dire_allnum',\ 'dire_index','dire_name','pos','neg','scoreseq'], test_size=0.1, sep="\t") config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: model = None # Model selection if args.model == "MLP_dire": args1 = utils.parse_args() model = MLP_dire(args1, n_user, n_item, 47) if args.model == "MF": model = MF_manu_dire_neg_pos(sess, n_user, n_item, 47, batch_size=batch_size) if args.model == "NNMF": model = NNMF(sess, n_user, n_item, learning_rate=learning_rate) if args.model == "NRR": model = NRR(sess, n_user, n_item) if args.model == "I-AutoRec": model = IAutoRec(sess, n_user, n_item) if args.model == "U-AutoRec": model = UAutoRec(sess, n_user, n_item)
import shutil from utils import utils from utils import preprocess from utils.save_figures import * from utils.apply_model import apply_model_single_input from utils.pad import pad_image from keras.models import load_model from keras import backend as K from models.losses import * os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' if __name__ == "__main__": ######################## COMMAND LINE ARGUMENTS ######################## results = utils.parse_args("multiseg") num_channels = results.num_channels NUM_GPUS = 1 if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" elif results.GPUID == -1: # find maximum number of available GPUs call = "nvidia-smi --list-gpus" pipe = Popen(call, shell=True, stdout=PIPE).stdout available_gpus = pipe.read().decode().splitlines() NUM_GPUS = len(available_gpus) else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) model_filename = results.weights
import nibabel as nib from utils import utils from utils import preprocess from utils.save_figures import * from utils.apply_model import apply_model_single_input from utils.pad import pad_image from keras.models import load_model from keras import backend as K from models.losses import * os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' if __name__ == "__main__": ######################## COMMAND LINE ARGUMENTS ######################## results = utils.parse_args("validate") num_channels = results.num_channels NUM_GPUS = 1 if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" elif results.GPUID == -1: # find maximum number of available GPUs call = "nvidia-smi --list-gpus" pipe = Popen(call, shell=True, stdout=PIPE).stdout available_gpus = pipe.read().decode().splitlines() NUM_GPUS = len(available_gpus) else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) model_filename = results.weights
def main(): global args args = parse_args() train_net(args)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Created by Charles on 19-3-18 # Function: import time from apscheduler.schedulers.background import BackgroundScheduler from config.config import logger from utils.utils import parse_args env, task_start_mode, interval = parse_args() from api import clean_environment, update_results, start_all_new_tasks, restart_all_tasks def dispatch_test(): import time from start_worker import app account_list = [("*****@*****.**", "nYGcEXNjGY")] for i in range(3): for acc in account_list: app.send_task('tasks.feed_account.feed_account', args=(acc[0], acc[1]), queue='feed_account_queue', routing_key='for_feed_account') time.sleep(600) def run(task_start_mode='new', update_interval=60): """ 启动任务调度系统 :param task_start_mode: 'new':清空缓存,从头开始; 'restart': 则继续上一次结束点开始, 之前未处理完的任务将继续被执行 :param update_interval: 结果更新周期,默认30秒
import numpy as np from sklearn.utils import shuffle from models.phinet import phinet from utils.utils import load_data, now, parse_args, preprocess, get_classes, load_image, record_results from keras.models import load_model, model_from_json from keras import backend as K import tempfile from keras.engine import Input, Model os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' if __name__ == '__main__': ############### DIRECTORIES ############### results = parse_args("test") if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) with open(results.model) as json_data: model = model_from_json(json.load(json_data)) model.load_weights(results.weights) CUR_DIR = os.path.abspath(os.path.expanduser(os.path.dirname(__file__))) REORIENT_SCRIPT_PATH = os.path.join(CUR_DIR, "utils", "reorient.sh") ROBUSTFOV_SCRIPT_PATH = os.path.join(CUR_DIR, "utils", "robustfov.sh") TMP_DIR = tempfile.mkdtemp() results.PREPROCESSED_DIR = tempfile.mkdtemp()
from utils.pad import pad_image os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' def get_file_id(filename): f = os.path.basename(filename) if "mask" in f: return f[:f.find("_mask")] else: return f[:f.find("_thresh")] if __name__ == "__main__": # get all mask filenames from GROUND_TRUTH_DIR results = utils.parse_args("calc_dice") GROUND_TRUTH_DIR = results.GT_DATA_DIR gt_ids = [get_file_id(x) for x in os.listdir(GROUND_TRUTH_DIR)] # interpret either individual file, or provided directory in_data = results.IN_DATA # if directory, then get all pred thresholded mask filenames # compute dice for files which are present in GT dir if os.path.isdir(in_data): # used only for printing result to console mean_dice = 0 pred_vols = [] gt_vols = []
from models.phinet import phinet, phinet_2D from models.multi_gpu import ModelMGPU from utils.nifti_image import NIfTIImageDataGenerator from utils.augmentations import * from utils.utils import parse_args, now os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' if __name__ == '__main__': ############### DIRECTORIES ############### results = parse_args("train") NUM_GPUS = 1 if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" elif results.GPUID == -1: NUM_GPUS = 3 else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) TRAIN_DIR = os.path.abspath(os.path.expanduser(results.TRAIN_DIR)) VAL_DIR = os.path.abspath(os.path.expanduser(results.VAL_DIR)) classes = results.classes.replace(" ", "").split(',') WEIGHT_DIR = os.path.abspath(os.path.expanduser(results.OUT_DIR))
import shutil from utils import utils from utils import preprocess from utils.save_figures import * from utils.apply_model import apply_model_single_input from utils.pad import pad_image from keras.models import load_model from keras import backend as K from models.losses import * os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ' if __name__ == "__main__": ######################## COMMAND LINE ARGUMENTS ######################## results = utils.parse_args("test") num_channels = results.num_channels NUM_GPUS = 1 if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" elif results.GPUID == -1: # find maximum number of available GPUs call = "nvidia-smi --list-gpus" pipe = Popen(call, shell=True, stdout=PIPE).stdout available_gpus = pipe.read().decode().splitlines() NUM_GPUS = len(available_gpus) else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) model_filename = results.weights
import pandas as pd from utils import utils import torchvision.transforms as transforms from utils.data_transforms import Unit, Resample from utils.dataset import MMFit, SequentialStridedSampler from torch.utils.data import RandomSampler, ConcatDataset from model.conv_ae import ConvAutoencoder from model.multimodal_ae import MultimodalAutoencoder from model.multimodal_ar import MultimodalFcClassifier ################ # Configuration ################ args = utils.parse_args() pp = pprint.PrettyPrinter(indent=4) pp.pprint(vars(args)) torch.backends.cudnn.benchmark = True ACTIONS = ['squats', 'lunges', 'bicep_curls', 'situps', 'pushups', 'tricep_extensions', 'dumbbell_rows', 'jumping_jacks', 'dumbbell_shoulder_press', 'lateral_shoulder_raises', 'non_activity'] TRAIN_W_IDs = ['01', '02', '03', '04', '06', '07', '08', '16', '17', '18'] VAL_W_IDs = ['14', '15', '19'] if args.unseen_test_set: TEST_W_IDs = ['00', '05', '12', '13', '20'] else: TEST_W_IDs = ['09', '10', '11'] # All modalities available in MM-Fit MODALITIES = ['sw_l_acc', 'sw_l_gyr', 'sw_l_hr', 'sw_r_acc', 'sw_r_gyr', 'sw_r_hr', 'sp_l_acc', 'sp_l_gyr', 'sp_l_mag', 'sp_r_acc', 'sp_r_gyr', 'sp_r_mag', 'eb_l_acc', 'eb_l_gyr', 'pose_2d', 'pose_3d']
import shutil import json from operator import itemgetter from datetime import datetime import numpy as np from sklearn.utils import shuffle from models.phinet import phinet from utils.utils import load_data, now, parse_args, preprocess_dir, get_classes, load_image, record_results from keras.models import load_model, model_from_json from keras import backend as K os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' if __name__ == '__main__': ############### DIRECTORIES ############### results = parse_args("validate") if results.GPUID == None: os.environ["CUDA_VISIBLE_DEVICES"] = "0" else: os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID) VAL_DIR = os.path.abspath(os.path.expanduser(results.VAL_DIR)) CUR_DIR = os.path.abspath(os.path.expanduser(os.path.dirname(__file__))) REORIENT_SCRIPT_PATH = os.path.join(CUR_DIR, "utils", "reorient.sh") ROBUSTFOV_SCRIPT_PATH = os.path.join(CUR_DIR, "utils", "robustfov.sh") PREPROCESSED_DIR = os.path.join(VAL_DIR, "preprocess") if not os.path.exists(PREPROCESSED_DIR):
@author: antonio """ from utils.utils import parse_args, warning_on_one_line, copy_dir_structure import warnings from normalize_files import normalize_files from filter_files import filter_files from concat_files import concat_files from deduplicate import deduplicate from time import time warnings.formatwarning = warning_on_one_line if __name__ == '__main__': in_path, out_path, is_concat = parse_args() '''in_path = '/home/antonio/Documents/Projects/BERT/prepro/data/toy_data/wiki' data_type = 'txt' target_lang = 'es' ''' ### Replicate directory structure copy_dir_structure('../data/', '../output/') ############# 0. ALL files to plain text, UTF-8 encoding ############# print('\n\nTransforming files to plain UTF8 text...\n\n') out_path_txt = normalize_files(in_path, is_concat) # TODO: need to write this ############# 1. FILTERING ############# print(