def get_script_arguments(): parser = argparse.ArgumentParser(description='Detect the faces on many images.') parser.add_argument('--images_dir', required=True, help='The directory with the images.') parser.add_argument('--output_filename', required=True, help='The filename that will contain ' 'the face detection results.') parser.add_argument('--batch_size', default=8, type=int, help='Batch size used in the model.') parser.add_argument('--extension', default='png', help='Extension of the images. Default is png.') args = get_arguments(parser) logger.info('Script inputs: {}.'.format(args)) return args
def __init__(self, in_channels: int, num_classes: int = 1) -> None: super().__init__() self.input_args = get_arguments() self.layers = nn.Sequential( ConvBlock(in_channels, 128, 7, 1), ConvBlock(128, 256, 5, 1), ConvBlock(256, 128, 3, 1), ) self.final = nn.Linear(128, num_classes)
def get_script_arguments(): parser = argparse.ArgumentParser( description='Detect the faces on many images.') parser.add_argument( '--files_to_copy', required=True, help='TXT file with absolute file paths. One line per path.') parser.add_argument('--output_dir', required=True, help='The name of the NVIDIA DIGITS directory.') args = get_arguments(parser) logger.info('Script inputs: {}.'.format(args)) return args
def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None): if isinstance(timeout, (integer_types, float)): self._query_timeout = float(timeout) else: self._query_timeout = DEFAULT_TIMEOUT urlo = urlparse(address) if urlo.scheme == '': base_url = 'http://' + address + ':' + str(port) self.url = base_url + '/transmission/rpc' else: if urlo.port: self.url = urlo.scheme + '://' + urlo.hostname + ':' + str( urlo.port) + urlo.path else: self.url = urlo.scheme + '://' + urlo.hostname + urlo.path LOGGER.info('Using custom URL "' + self.url + '".') if urlo.username and urlo.password: user = urlo.username password = urlo.password elif urlo.username or urlo.password: LOGGER.warning( 'Either user or password missing, not using authentication.' ) if http_handler is None: self.http_handler = DefaultHTTPHandler() else: if hasattr(http_handler, 'set_authentication') and hasattr( http_handler, 'request'): self.http_handler = http_handler else: raise ValueError('Invalid HTTP handler.') if user and password: self.http_handler.set_authentication(self.url, user, password) elif user or password: LOGGER.warning( 'Either user or password missing, not using authentication.') self._sequence = 0 self.session = None self.session_id = 0 self.server_version = None self.protocol_version = None self.get_session() self.torrent_get_arguments = get_arguments('torrent-get', self.rpc_version)
def main(): args = get_arguments() try: # directories返回:{'logdir':logdir, 'restore_from':restore_from} directories = validate_directories(args) except ValueError as e: print("Some arguments are wrong:") print(str(e)) return if args.l2_regularization_strength == 0: args.l2_regularization_strength = None # ???默认为0 train(directories, args) # 此时data_dir = test_dir return
def main(): args = get_arguments() try: directories = validate_directories(args) except ValueError as e: print("Some arguments are wrong:") print(str(e)) return if args.l2_regularization_strength == 0: args.l2_regularization_strength = None train(directories,args) return
def get_script_arguments(): parser = argparse.ArgumentParser( description='Detect the faces on many images.') parser.add_argument('--input_dir', required=True, help='The directory with the images.') parser.add_argument('--output_dir', required=True, help='The output directory for the resized images.') parser.add_argument('--resize_dims', default='512,512', help='The resize dimensions. Default is (512, 512).') parser.add_argument('--extension', default='png', help='Extension of the images. Default is png.') args = get_arguments(parser) logger.info('Script inputs: {}.'.format(args)) return args
def __init__(self, c_in, c_out, bottleneck=32, ks=40, nb_filters=32, residual=True, depth=6): super().__init__() self.input_args = get_arguments() self.block = InceptionBlock(c_in, bottleneck=bottleneck, ks=ks, nb_filters=nb_filters, residual=residual, depth=depth) self.gap = torch.nn.AdaptiveAvgPool1d(1) self.fc = torch.nn.Linear(nb_filters * 4, c_out)
def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None): if isinstance(timeout, (integer_types, float)): self._query_timeout = float(timeout) else: self._query_timeout = DEFAULT_TIMEOUT urlo = urlparse(address) if urlo.scheme == '': base_url = 'http://' + address + ':' + str(port) self.url = base_url + '/transmission/rpc' else: if urlo.port: self.url = urlo.scheme + '://' + urlo.hostname + ':' + str(urlo.port) + urlo.path else: self.url = urlo.scheme + '://' + urlo.hostname + urlo.path LOGGER.info('Using custom URL "' + self.url + '".') if urlo.username and urlo.password: user = urlo.username password = urlo.password elif urlo.username or urlo.password: LOGGER.warning('Either user or password missing, not using authentication.') if http_handler is None: self.http_handler = DefaultHTTPHandler() else: if hasattr(http_handler, 'set_authentication') and hasattr(http_handler, 'request'): self.http_handler = http_handler else: raise ValueError('Invalid HTTP handler.') if user and password: self.http_handler.set_authentication(self.url, user, password) elif user or password: LOGGER.warning('Either user or password missing, not using authentication.') self._sequence = 0 self.session = None self.session_id = 0 self.server_version = None self.protocol_version = None self.get_session() self.torrent_get_arguments = get_arguments('torrent-get' , self.rpc_version)
def __init__(self, in_channels: int, mid_channels: int = 64, num_pred_classes: int = 1) -> None: super().__init__() self.input_args = get_arguments() # for easier saving and loading self.input_args = { 'in_channels': in_channels, "mid_channels": mid_channels, 'num_pred_classes': num_pred_classes } self.layers = nn.Sequential(*[ ResNetBlock(in_channels=in_channels, out_channels=mid_channels), ResNetBlock(in_channels=mid_channels, out_channels=mid_channels * 2), ResNetBlock(in_channels=mid_channels * 2, out_channels=mid_channels * 2), ]) self.final = nn.Linear(mid_channels * 2, num_pred_classes)
def __init__( self, hidden_size: int, in_channels: int, num_layers: int = 1, num_classes: int = 1, dropout: float = 0, rnn_dropout: float = 0, bidirectional: bool = False, ) -> None: super().__init__() self.input_args = get_arguments() self.fcn = nn.Sequential( ConvBlock(in_channels, 128, 8, 1), ConvBlock(128, 256, 5, 1), ConvBlock(256, 128, 3, 1), ) self.hidden_size = hidden_size // (1 + bidirectional) self.lstm = nn.LSTM( 1, self.hidden_size, num_layers, batch_first=True, dropout=(num_layers > 1) * rnn_dropout, bidirectional=bidirectional, ) self.final = nn.Linear(128 + self.hidden_size * (1 + bidirectional), num_classes) self.dropout = dropout self.bidirectional = bidirectional self.num_layers = num_layers
def main(): local_css( "/home/pasonatech/workspace/albumentations_forked/albumentations-demo/src/custom_css.css" ) # logo_img = "/home/pasonatech/workspace/albumentations_forked/albumentations-demo/images/p.png" # html_sticky = f""" # <div class="sticky pt-2"> # <img class="img-fluid" src="data:image/png;base64,{base64.b64encode(open(logo_img, "rb").read()).decode()}"> # </div> # """ # st.markdown(html_sticky ,unsafe_allow_html = True) # get CLI params: the path to images and image width path_to_images, width_original = get_arguments() if not os.path.isdir(path_to_images): st.title("There is no directory: " + path_to_images) else: # select interface type interface_type = st.sidebar.radio( "Select the interface mode", ["Simple", "Professional", "Custom", "LoadMyFile"]) #pick css if interface_type == "LoadMyFile": local_css( "/home/pasonatech/workspace/albumentations_forked/albumentations-demo/src/custom_loadmy_css.css" ) if interface_type == "Custom": json_file_name = st.sidebar.text_input( "Insert Json File Name", "aug_file") #text_area same format json_file_name = os.path.join("./my_json_files", f"{json_file_name}" + '.json') # select image status, image = select_image(path_to_images, interface_type) if status == 1: st.title("Can't load image") if status == 2: st.title("Please, upload the image") else: # image was loaded successfully placeholder_params = get_placeholder_params(image) # load the config augmentations = load_augmentations_config( placeholder_params, "configs/augmentations.json") if interface_type is not "LoadMyFile": # get the list of transformations names transform_names = select_transformations( augmentations, interface_type) if interface_type is "Custom": transforms = get_transormations_params_custom( transform_names, augmentations, json_file_name) elif interface_type is "LoadMyFile": f_name = st.sidebar.file_uploader("Select your json file", type="json") view_times = 0 if f_name: j_text = StringIO.read(f_name) j_data = json.loads(j_text) image_replace = st.empty() st.image(image, caption="Original image", width=width_original) if st.sidebar.button("Play Preview"): view_times = 1 stop_btn = st.sidebar.button("STOP Preview") if stop_btn: view_times = 0 # for seconds in range(view_times): # data =j_data try: transform = A.from_dict(j_data) display_value = True except KeyError: st.error( "Please, confirm your augmentations structure.") st.error( "Supports only albumentations augmentation generated 'A.to_dict()'." ) # view_times = 0 display_value = False while (view_times == 1): try: # data = json.load(open(file_name, 'r')) # transform = A.from_dict(data) aug_img_obj = transform(image=image) # print(aug_img_obj.keys()) aug_img = aug_img_obj['image'] image_replace.image( aug_img, caption="Transformed image", width=width_original, ) except IOError: st.error("Confirm your json file path.") view_times = 0 except UnboundLocalError: st.error( "Your json file seems incompatible to run this task. " ) view_times = 0 except ValueError as e: image_replace.error( e) #replaces error log in same field pass time.sleep(1) if stop_btn is True: st.info( "Preview Stopped. Press Play Preview button to resume previewing." ) if display_value: if st.sidebar.checkbox( "Display Augmentation Parameters"): onetine_data_loader(j_data) transforms = [] else: st.header("WELCOME") st.header("Please upload a JSON File") else: # get parameters for each transform transforms = get_transormations_params(transform_names, augmentations) if interface_type is not "LoadMyFile": try: # apply the transformation to the image data = A.ReplayCompose(transforms)(image=image) error = 0 except ValueError: error = 1 st.title( "The error has occurred. Most probably you have passed wrong set of parameters. \ Check transforms that change the shape of image.") # proceed only if everything is ok if error == 0: augmented_image = data["image"] # show title st.title("Demo of Albumentations") # show the images width_transformed = int(width_original / image.shape[1] * augmented_image.shape[1]) st.image(image, caption="Original image", width=width_original) st.image( augmented_image, caption="Transformed image", width=width_transformed, ) # comment about refreshing st.write("*Press 'R' to refresh*") #custom preview of aug list # random values used to get transformations show_random_params(data, interface_type) for transform in transforms: show_docstring(transform) st.code(str(transform)) show_credentials() # adding google analytics pixel # only when deployed online. don't collect statistics of local usage if "GA" in os.environ: st.image(os.environ["GA"]) st.markdown( ("[Privacy policy]" + ("(https://htmlpreview.github.io/?" + "https://github.com/IliaLarchenko/" + "albumentations-demo/blob/deploy/docs/privacy.html)") ))
def main(): # get CLI params: the path to images and image width path_to_images, width_original = get_arguments() if not os.path.isdir(path_to_images): st.title("There is no directory: " + path_to_images) else: # select interface type interface_type = st.sidebar.radio("Select the interface mode", ["Simple", "Professional"]) # select image status, image = select_image(path_to_images, interface_type) if status == 1: st.title("Can't load image") if status == 2: st.title("Please, upload the image") else: # image was loaded successfully placeholder_params = get_placeholder_params(image) # load the config augmentations = load_augmentations_config( placeholder_params, "configs/augmentations.json") # get the list of transformations names transform_names = select_transformations(augmentations, interface_type) # get parameters for each transform transforms = get_transormations_params(transform_names, augmentations) try: # apply the transformation to the image data = A.ReplayCompose(transforms)(image=image) error = 0 except ValueError: error = 1 st.title( "The error has occurred. Most probably you have passed wrong set of parameters. \ Check transforms that change the shape of image.") # proceed only if everything is ok if error == 0: augmented_image = data["image"] # show title st.title("Demo of Albumentations") # show the images width_transformed = int(width_original / image.shape[1] * augmented_image.shape[1]) st.image(image, caption="Original image", width=width_original) st.image( augmented_image, caption="Transformed image", width=width_transformed, ) # random values used to get transformations show_random_params(data, interface_type) # print additional info for transform in transforms: show_docstring(transform) st.code(str(transform)) show_credentials()
from env import R2RBatch from refer360_env import Refer360Batch from utils import Tokenizer, read_vocab from vocab import TRAIN_VOCAB from train import make_arg_parser from utils import get_arguments from pprint import pprint import os arg_parser = make_arg_parser() arg_parser.add_argument('--cache_path', type=str, required=True) args = get_arguments(arg_parser) vocab = read_vocab(TRAIN_VOCAB, args.language) tok = Tokenizer(vocab) if args.env == 'r2r': EnvBatch = R2RBatch elif args.env in ['refer360']: EnvBatch = Refer360Batch if args.prefix in ['refer360', 'r2r', 'R2R', 'REVERIE', 'r360tiny', 'RxR_en-ALL']: val_splits = ['val_unseen', 'val_seen'] target = 'val_unseen' elif args.prefix in ['touchdown', 'td']: val_splits = ['dev'] target = 'dev' env = EnvBatch(['none'], splits=['train'] + val_splits, tokenizer=tok, args=args) if args.env == 'r2r':
(^ # Numbers separated by whitespace only #\s*\d{3} # Line starts with spaces (optional) then 3 digits (pressure levels) \s*\d{2,} [^\n] # should not be a new line after 3 digits [-\d\.\s]{10,} [\s\S]* $) ''', re.VERBOSE | re.MULTILINE) variables, values = pattern.search(text).groups() variables = variables.split("\n") variables = [v for v in variables if len(v) > 0] values = values.split('\n') cleaned_values = [] for v in values: # Remove leading spaces v = v.lstrip('File').lstrip() if len(v) == 0: continue # don't split text values (contains any letter that is not E used in exponential numbers) if re.search('(?!e)(?!E)[a-zA-Z]', v): cleaned_values.append(v) else: cleaned_values.extend(v.split()) constants = dict(zip(variables, cleaned_values)) return constants if __name__ == "__main__": # Get the input files from the arguments files = get_arguments() convert_ozonedata(files)
] }} ] }} ''' def add_arguments_cb(arg_parser): arg_parser.add_argument('--ecdsa_cert', action="store_true", default=False, help='0/1') if __name__ == '__main__': CmdArgs = utils.get_arguments("cps test", add_arguments_cb) cfg_j_pre = '''{ "zones" : [ ''' cfg_j_post = ''' ] }''' cfg_j = '' for i in range(CmdArgs.zones): if i: cfg_j = cfg_j + ','
subprocess.call(["git", "init"], cwd=path) subprocess.call(["git", "add", "."], cwd=path) except: print "Making conda-git-deployment into git repository failed." shutil.rmtree(tempdir) if __name__ == "__main__": # Install git if its not available if not utils.check_executable("git"): subprocess.call(["conda", "install", "-c", "anaconda", "git", "-y"]) # Git initialise initialise_git() # Git update if (utils.get_arguments()["update-environment"] or utils.get_arguments()["update-repositories"]): update() # Execute install args = [ "python", os.path.join(os.path.dirname(__file__), "environment.py") ] args.extend(sys.argv[1:]) subprocess.call(args)
import os import utils import sound import image import timeit libraries = [ 'sys', 'os', 'time', 'numpy', 'os', 'struct', 'PIL', 'hashlib', "pydub" ] utils.checkImports(libraries) args_parser = utils.setup_cli() config = utils.get_arguments(args_parser) utils.clearDirectory(config['outputPath']) inputFilesList = utils.listFilesInPath(config['inputPath']) numberOfFiles = len(inputFilesList) numberOfDoneFiles = 0 if config['action'] == "encode": if config['fileType'] == "image": for file in inputFilesList: filename = file.split(".")[0] fileContent = utils.readFileContent(config['inputPath'], file) if config['encoding'] == "ascii": preContent = image.convertAsciiToNumbers( config["a"], config["b"], fileContent) else:
def batch_experiments( manifesto: str, dry_run: bool = False, allow_duplicate_name: bool = False, task: str = "weight_poisoning", host: Optional[str] = None, ignore_errors: bool = False, ): if not hasattr(run_experiment, task): raise ValueError(f"Run experiment has no task {task}, " "please check for spelling mistakes") trn_func = getattr(run_experiment, task) with open(manifesto, "rt") as f: settings = yaml.load(f, Loader=yaml.FullLoader) default_params = settings.pop("default") weight_dump_prefix = settings.pop("weight_dump_prefix") default_experiment_name = default_params.get("experiment_name", "sst") for name, vals in settings.items(): if not isinstance(vals, dict): print(f"Skipping {name} with vals {vals}") continue experiment_name = vals.get( "experiment_name", default_experiment_name, ) if run_exists(name, experiment_name=experiment_name): # Make sure we don't run the same run twice, unless we want to if not allow_duplicate_name: warnings.warn( f"Run with name {experiment_name}/{name} " "already exists, skipping", ) continue else: warnings.warn( f"Run with name {experiment_name}/{name} already " "exists, adding new run with duplicate name", ) # Construct params # create deep copy to prevent any sharing params = dict(default_params) _update_params(params, _inherit(settings, vals, set([name]))) _update_params(params, vals) if "inherits" in params: params.pop("inherits") if params.pop("skip", False): print(f"Skipping {name} since `skip=True` was specified") continue if "name" in get_arguments(trn_func): params["name"] = name if "weight_dump_dir" in get_arguments(trn_func): params["weight_dump_dir"] = os.path.join(weight_dump_prefix, name) elif "log_dir" in get_arguments(trn_func) and "eval" not in task: params["log_dir"] = os.path.join(weight_dump_prefix, name) # meta parameter for aggregating results if "table_entry" in params: params.pop("table_entry") print(f"Running {name} with {params}") if not dry_run: # Save params to a temporary yaml file _dump_params(params) # Run single experiment with ExceptionHandler(ignore=ignore_errors): run("python batch_experiments.py single " f"--fname _tmp.yaml --task {task}")
def main(): # get CLI params: the path to images and image width path_to_images, width_original = get_arguments() if not os.path.isdir(path_to_images): st.title("There is no directory: " + path_to_images) else: # select interface type interface_type = st.sidebar.radio( "Select the interface mode", ["Simple", "Professional", "Ultimate"], 2) # select image status, image = select_image(path_to_images, interface_type) if status == 1: st.title("Can't load image") if status == 2: st.title("Please, upload the image") else: # image was loaded successfully placeholder_params = get_placeholder_params(image) # load the config augmentations = load_augmentations_config( placeholder_params, "configs/augmentations.json") # get the list of transformations names transform_names = select_transformations(augmentations, interface_type) # get parameters for each transform transforms = get_transormations_params(transform_names, augmentations) try: # apply the transformation to the image data = A.ReplayCompose(transforms)(image=image) error = 0 except ValueError: error = 1 st.title( "The error has occurred. Most probably you have passed wrong set of parameters. \ Check transforms that change the shape of image.") # proceed only if everything is ok if error == 0: augmented_image = data["image"] # show title st.title("Demo of Albumentations") # show the images width_transformed = int(width_original / image.shape[1] * augmented_image.shape[1]) st.image(image, caption="Original image", width=width_original) st.image( augmented_image, caption="Transformed image", width=width_transformed, ) # comment about refreshing st.write("*Press 'R' to refresh*") # random values used to get transformations show_random_params(data, interface_type) # print additional info for transform in transforms: show_docstring(transform) st.code(str(transform)) show_credentials() # adding google analytics pixel # only when deployed online. don't collect statistics of local usage if "GA" in os.environ: st.image(os.environ["GA"]) st.markdown( ("[Privacy policy]" + ("(https://htmlpreview.github.io/?" + "https://github.com/IliaLarchenko/" + "albumentations-demo/blob/deploy/docs/privacy.html)") ))
def main(): cuda_available = torch.cuda.is_available() train_params, dataset_params = get_arguments() net = AutoEncoder() epoch_trained = 0 if train_params['restore_model']: net = load_model(net, train_params['restore_dir'], train_params['restore_model']) if net is None: print("Initialize network and train from scratch.") net = AutoEncoder() else: epoch_trained = 0 train_loader, validation = audio_data_loader(**dataset_params) if cuda_available is False: warnings.warn( "Cuda is not avalable, can not train model using multi-gpu.") if cuda_available: # Remove train_params["device_ids"] for single GPU if train_params["device_ids"]: batch_size = dataset_params["batch_size"] num_gpu = len(train_params["device_ids"]) assert batch_size % num_gpu == 0 net = nn.DataParallel(net, device_ids=train_params['device_ids']) torch.backends.cudnn.benchmark = True net = net.cuda() criterion = nn.MSELoss() optimizer = get_optimizer(net, train_params['optimizer'], train_params['learning_rate'], train_params['momentum']) if cuda_available: criterion = criterion.cuda() if not os.path.exists(train_params['log_dir']): os.makedirs(train_params['log_dir']) if not os.path.exists(train_params['restore_dir']): os.makedirs(train_params['restore_dir']) train_loss_log_file = open(train_params['log_dir'] + 'train_loss_log.log', 'a') test_loss_log_file = open(train_params['log_dir'] + 'test_loss_log.log', 'a') # Add print for start of training time time = str(datetime.now()) line = 'Training Started at' + str(time) + ' !!! \n' train_loss_log_file.writelines(line) train_loss_log_file.flush() # Keep track of losses train_losses = [] eval_losses = [] best_eval = float('inf') # Begin! for epoch in range(train_params['num_epochs']): train(net, criterion, optimizer, train_losses, train_params, train_loss_log_file, train_loader, cuda_available) eval_loss = evaluate(net, criterion, epoch, eval_losses, validation, test_loss_log_file, cuda_available) if eval_loss < best_eval: save_model(net, 1, train_params['restore_dir']) torch.save(net.state_dict(), train_params['restore_dir'] + 'bestmodel.pth') best_eval = eval_loss save_model(net, epoch_trained + epoch + 1, train_params['restore_dir']) torch.save([train_losses, eval_losses, epoch], train_params['restore_dir'] + 'data_params') # Add print for end of training time time = str(datetime.now()) line = 'Training Ended at' + str(time) + ' !!! \n' train_loss_log_file.writelines(line) train_loss_log_file.flush() train_loss_log_file.close() test_loss_log_file.close()
else: l2_dynamic_option = '' return ' '.join([ cpu_options, l1cache_options, l2cache_options, memory_options, l1_mshr_options, l1_write_buffer_options, l2_dynamic_option ]) def get_dynamic_option_command(self, dynamic_option): return utils.DYNAMIC_OPTION_COMMANDS[ self.args.dynamic_option_name] + dynamic_option def run_experiment(dynamic_option, experiment_manager, args): experiment_manager.launch(dynamic_option) if __name__ == "__main__": args = utils.get_arguments() utils.create_result_directories(args.app_name) experiment_manager = ExperimentManager() if args.dynamic_option_name: method_with_args = partial(run_experiment, experiment_manager=experiment_manager, args=args) with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor: executor.map(method_with_args, utils.DYNAMIC_OPTIONS[args.dynamic_option_name]) else: experiment_manager.launch()
from models import MLP, SVM, MLP_GMM, GAN, LogReg, MLP_max_diff, Stacking, StackingSimple, LSTM_model, CharacterCNN from models.light_gbm_model import LightGbmWithLogReg from nltk import ConfusionMatrix from sklearn.model_selection import StratifiedKFold import numpy as np from metrics.breach_evaluator import evaluate, computeMeasures from sklearn.metrics import confusion_matrix from chunkers import get_sentences TRAINING_DIR = 'data/training' TRAINING_EXTERNAL_FILE = 'data/feather/external_stackexchange_feather' VALIDATION_DIR = 'data/validation' BREACH_DIR = 'data/breach' input_dir, output_dir = get_arguments() TEST_DIR = input_dir or 'data/validation' OUTPUT_DIR = output_dir or 'data/output' # average text length = 4329 # average tokens count = 863 def main(estimator=MLP, cv_split=5, with_cross_validation=True, with_validation=False, with_test=False, with_external_data=False, validate_on_external=False, with_grid_search=False,
def console_main(): URL, no_to_download, format_list, download_path, max_filesize, dump_urls, use_ghost = get_arguments( ) print "\n ImageScraper\n ============\n Requesting page....\n" page_html, page_url = get_html(URL, use_ghost) images = get_img_list(page_html, page_url, format_list) if len(images) == 0: sys.exit("Sorry, no images found.") if no_to_download == 0: no_to_download = len(images) print "Found %s images: " % len(images) process_download_path(download_path) for img_url in images: if dump_urls: print img_url count = 0 percent = 0.0 failed = 0 over_max_filesize = 0 widgets = [ 'Progress: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed() ] pbar = ProgressBar(widgets=widgets, maxval=100).start() for img_url in images: flag, size_flag = download_image(img_url, download_path, max_filesize) if not flag: if not size_flag: failed += 1 else: over_max_filesize += 1 count += 1 percent = percent + 100.0 / no_to_download pbar.update(percent % 100) if count == no_to_download: break pbar.finish() print "\nDone!\nDownloaded %s images" % (count - failed - over_max_filesize) return
def main(): conf = utils.read_yaml(utils.get_arguments()["unknown"][0]) os.remove(utils.get_arguments()["unknown"][0]) # Clone repositories. Using os.getcwd() because the drive letter needs to # be respected on Windows. repositories_path = os.path.abspath( os.path.join(os.getcwd(), "repositories", conf["name"])) os.environ["CONDA_ENVIRONMENT_REPOSITORIES"] = repositories_path # Kept for backwards compatibility os.environ["CONDA_GIT_REPOSITORY"] = repositories_path repositories = [] for item in conf["dependencies"]: if "git" in item: for repo in item["git"]: repo_path = "" if isinstance(repo, str): repo_path = repo if isinstance(repo, dict): repo_path = repo.keys()[0] data = {"url": repo_path} name = repo_path.split("/")[-1].replace(".git", "") if not name: name = repo_path.split("/")[-2] if "@" in name: name = name.split("@")[0] repo_path = repo_path.split("@")[0] data["name"] = name if not os.path.exists(repositories_path): os.makedirs(repositories_path) if name not in os.listdir(repositories_path): subprocess.call(["git", "clone", repo_path], cwd=repositories_path) data["path"] = os.path.join(repositories_path, name) data["commands"] = { "on_launch": [], "on_environment_update": [] } if isinstance(repo, dict): for item in repo[repo.keys()[0]]: if isinstance(item, dict): for event, commands in item.iteritems(): data["commands"][event].extend(commands) else: data["commands"]["on_launch"].append(item) repositories.append(data) # Update repositories. if utils.get_arguments()["update-repositories"]: for repo in repositories: print repo["name"] # Updating origin url subprocess.call([ "git", "remote", "set-url", "origin", repo["url"].split("@")[0] ], cwd=repo["path"]) # Update git repository subprocess.call(["git", "checkout", "master"], cwd=repo["path"]) subprocess.call(["git", "pull"], cwd=repo["path"]) subprocess.call( ["git", "submodule", "update", "--init", "--recursive"], cwd=repo["path"]) subprocess.call(["git", "submodule", "update", "--recursive"], cwd=repo["path"]) # Checkout any commits/tags if there are newly cloned repositories or # updating the repositories. if utils.get_arguments()["update-repositories"]: for repo in repositories: if "@" in repo["url"]: tag = repo["url"].split("@")[1] if tag: print repo["name"] subprocess.call(["git", "checkout", tag], cwd=repo["path"]) # Checkout environment repository environment_path = utils.get_environment() if not os.path.exists(environment_path): # Determine environment repositories by matching passed environment # with repositories environment_repo = None match = 0.0 for repo in repositories: sequence_match = SequenceMatcher(None, repo["url"], environment_path).ratio() if match < sequence_match: environment_repo = repo print environment_repo["name"] branch = environment_path.split("/")[-2] subprocess.call(["git", "checkout", branch], cwd=environment_repo["path"]) # Install any setup.py if we are updating if (utils.get_arguments()["update-repositories"] or utils.get_arguments()["update-environment"]): for repo in repositories: if "setup.py" not in os.listdir(repo["path"]): continue args = ["python", "setup.py", "develop"] subprocess.call(args, cwd=repo["path"]) # Add environment site packages to os.environ prefix = "" if platform.system().lower() == "windows": prefix = os.environ["CONDA_PREFIX"] else: prefix = os.environ["CONDA_ENV_PATH"] path = os.path.join(prefix, "lib", "site-packages") os.environ["PYTHONPATH"] += os.pathsep + path # Add sys.path to os.environ["PYTHONPATH"], because conda only modifies # sys.path which gets lost when launching any detached subprocesses. # This get a little complicated due to being in a process that hasn"t # picked up on the changes, hence going through a subprocess. python_file = os.path.join(os.path.dirname(__file__), "write_sys_path.py") data_file = os.path.join(tempfile.gettempdir(), "data_%s.yml" % os.getpid()) subprocess.call(["python", python_file, data_file]) paths = [] with open(data_file, "r") as f: paths += utils.read_yaml(f.read()) os.remove(data_file) for path in paths: if path.lower().startswith(repositories_path.lower()): os.environ["PYTHONPATH"] += os.pathsep + path if path.endswith(".egg"): os.environ["PYTHONPATH"] += os.pathsep + path # Clean up any existing environment file if os.path.exists(utils.get_environment_path()): os.remove(utils.get_environment_path()) # Ensure subprocess is detached so closing connect will not also # close launched applications. options = {} if not utils.get_arguments()["attached"]: if sys.platform == "win32": options["creationflags"] = subprocess.CREATE_NEW_CONSOLE else: options["preexec_fn"] = os.setsid # Setting update mode environment variable update_modes = [] if utils.get_arguments()["update-environment"]: update_modes.append("environment") if utils.get_arguments()["update-repositories"]: update_modes.append("repositories") os.environ["CONDA_GIT_UPDATE"] = "" for mode in update_modes: os.environ["CONDA_GIT_UPDATE"] += mode + os.pathsep # Execute environment update commands. if utils.get_arguments()["update-environment"]: for repo in repositories: if "commands" in repo.keys(): for cmd in repo["commands"]["on_environment_update"]: os.environ.update(utils.read_environment()) cmd = cmd.replace("$REPO_PATH", repo["path"]) print "Executing: " + cmd subprocess.call(cmd, shell=True, cwd=repo["path"], **options) # Execute launch commands. for repo in repositories: if "commands" in repo.keys(): for cmd in repo["commands"]["on_launch"]: os.environ.update(utils.read_environment()) cmd = cmd.replace("$REPO_PATH", repo["path"]) print "Executing: " + cmd subprocess.call(cmd, shell=True, cwd=repo["path"], **options)
def main(): # Get environment path environment_path = utils.get_environment() # If no environment is defined, put user in root environment. if not environment_path: msg = "\n\nCould not find the \"environment.conf\" file in \"{path}\"." msg += "\nPlease create an environment pointer file and save it as " msg += "\"{path}/environment.conf\"." msg += "\nYou can also modify the included example " msg += "\"{path}/environment.conf.example\", and rename to " msg += "\"{path}/environment.conf\"." msg += "\n\nYou are in the root environment of Conda. " msg += "The \"conda\" command is available to use now." path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) path = path.replace("\\", "/") print msg.format(path=path) return # If requested to put user into the root environment. if environment_path == "root": msg = "You are in the root environment of Conda. " msg += "The \"conda\" command is available to use now." path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) path = path.replace("\\", "/") print msg.format(path=path) return # Add conda_git_deployment module to environment. # Also removing PYTHONPATH that conda root environment needs. path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) os.environ["PYTHONPATH"] = path # Get environment data. environment_string = "" if os.path.exists(environment_path): f = open(environment_path, "r") environment_string = f.read() f.close() else: msg = "Could not find \"{0}\" on disk." print msg.format(environment_path) if not environment_string: environment_string = requests.get(environment_path).text environment_data = utils.read_yaml(environment_string) # Export environment if (utils.get_arguments()["export"] or utils.get_arguments()["export-without-commit"]): repositories_path = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "repositories", environment_data["name"])) # Get commit hash and name from repositories on disk. if not utils.check_executable("git"): subprocess.call( ["conda", "install", "-c", "anaconda", "git", "-y"]) disk_repos = {} for repo in os.listdir(repositories_path): path = os.path.join(repositories_path, repo) if not os.path.exists(os.path.join(path, ".git")): continue commit_hash = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=path).rsplit()[0] disk_repos[repo] = commit_hash # Construct new git dependencies. git_data = {"git": []} for item in environment_data["dependencies"]: if "git" in item: for repo in item["git"]: # Get url from enviroment file. url = "" if isinstance(repo, str): url = repo if isinstance(repo, dict): url = repo.keys()[0] # Skip any repositories that aren't cloned yet. name = url.split("/")[-1].replace(".git", "").split("@")[0] if name not in disk_repos.keys(): continue # Construct commit url if requested. commit_url = url.split("@")[0] if not utils.get_arguments()["export-without-commit"]: commit_url += "@" + disk_repos[name] if isinstance(repo, str): git_data["git"].append(commit_url) if isinstance(repo, dict): git_data["git"].append({commit_url: repo[url]}) # Replace git dependencies for item in environment_data["dependencies"]: if "git" in item: environment_data["dependencies"].remove(item) environment_data["dependencies"].append(git_data) # Write environment file utils.write_yaml(environment_data, os.path.join(os.getcwd(), "environment.yml")) return # Writing original environment to disk data_file = os.path.join(tempfile.gettempdir(), 'data_%s.yml' % os.getpid()) utils.write_yaml(environment_data, data_file) # Remove git from environment as its not supported by conda (yet). for item in environment_data["dependencies"]: if "git" in item: index = environment_data["dependencies"].index(item) del environment_data["dependencies"][index] # Create environment file from passed environment. environment_filename = os.path.join(tempfile.gettempdir(), 'env_%s.yml' % os.getpid()) utils.write_yaml(environment_data, environment_filename) args = ["conda", "env", "create"] # Force environment update/rebuild when requested by command. if utils.get_arguments()["update-environment"]: args.append("--force") # Check whether the environment installed is different from the requested # environment, and whether the conda-git-deployment is different. # Force environment update/rebuild if different. environment_update = False if not utils.get_arguments()["suppress-environment-update"]: incoming_md5 = hashlib.md5(environment_string + "cwd: {0}".format(os.getcwd())).hexdigest() existing_md5 = "" md5_path = os.path.join(os.path.expanduser("~"), "AppData", "Local", "Continuum", "Miniconda2", environment_data["name"] + ".md5") if os.path.exists(md5_path): f = open(md5_path, "r") existing_md5 = f.read() f.close() if incoming_md5 != existing_md5: environment_update = True if "--force" not in args: args.append("--force") with open(md5_path, "w") as the_file: the_file.write(incoming_md5) # Create environment args.extend(["-f", environment_filename]) return_code = subprocess.call(args) os.remove(environment_filename) # Spawning a new process to get the correct python executable and # passing data via file on disk. platform_script = "environment.sh" if platform.system().lower() == "windows": platform_script = "environment.bat" args = [ os.path.join(os.path.dirname(__file__), platform_script), environment_data["name"], os.path.join(os.path.dirname(__file__), "install.py"), data_file ] args.extend(sys.argv[1:]) # If its the first installation, we need to pass update to install.py if not return_code: args.append("--update-environment") if platform.system().lower() != "windows": args.insert(0, "bash") if environment_update and "--update-environment" not in args: args.append("--update-environment") subprocess.call(args)
for i, sample_length in enumerate(lengths): inputs[i, lengths[i]:, ] = 0 targets[i, lengths[i]:, ] = 0 return lengths def evaluate(params, model): # evaluate on more bits than training to ensure generalization test_loader = DataLoader( XORDataset(num_sequences=5000, num_bits=int(params.max_bits * 1.5)), batch_size=500) is_correct = np.array([]) for inputs, targets in test_loader: lengths = adjust_lengths(params.vary_lengths, inputs, targets) inputs = inputs.to(params.device) targets = targets.to(params.device) with torch.no_grad(): logits, predictions = model(inputs, lengths) is_correct = np.append(is_correct, ((predictions > 0.5) == (targets > 0.5))) accuracy = is_correct.mean() return accuracy if __name__ == '__main__': params = get_arguments(ModelParams) train(params)
gc.collect() # save models model_path = '../../models/models_{0:%m%d_%H%M%S}_{1:.5f}_{2}.pkl'.format( start_dt, scores_cv_te['auc'], model_name) with open(model_path, 'wb') as f: pickle.dump(trained_models, f) if __name__ == '__main__': # set logger start_dt = dt.now() logger = get_logger(__name__, start_dt) logger.debug('start train_model.py') # get config args = get_arguments() config = json.load(open('../../configs/{}'.format(args.config))) logger.debug(config) # get debag flag _debug = args.debug # load model models = {'random_forest': train_rf, 'logistic_regression': train_lr} train_and_predict = models[config['model_name']] train_model(config, _debug, logger, start_dt, train_and_predict) logger.debug('finish train_model.py')
"136.2{subnet_id}.51.0/24" , "137.2{subnet_id}.51.0/24" , "138.2{subnet_id}.51.0/24" ], "app_list" : [ ''' cfj_template_part5 = ''' ] }} ''' def add_arguments_cb(arg_parser): pass if __name__ == '__main__': CmdArgs = utils.get_arguments("mixed cipher test", add_arguments_cb) cfg_j_pre = '''{ "zones" : [ ''' cfg_j_post = ''' ] }''' cfg_j = '' cfj_template_part2 = '' cfj_template_part4 = '' for i in range(len(supported_ciphers)): if i: