def main(): parser = create_parser() args = parser.parse_args() try: files = get_files(args) except Exception as e: print("Ошибка: " + str(e), file=sys.stderr) exit(-1) if args.count or args.count_total: check_files(files, args.recursive) tags = count_tags(files) if args.json_output: result = {} else: result = '' if args.count_total: if args.json_output: result['total'] = len(tags) else: result += 'Различных тегов: %i\n' % len(tags) if args.count: if args.json_output: result['tags'] = tags else: sorted_tags = [ (tag, count) for tag, count in tags.items() ] sorted_tags.sort(key=lambda x: x[1], reverse=True) for tag, count in sorted_tags: result += '%s: %i\n' % (tag, count) if args.json_output: print(re.sub(r"'", r'"', str(result))) else: print(result) elif args.map: try: tagmap = read_tagmap(args.map) except Exception as e: print("Ошибка: " + str(e), file=sys.stderr) exit(-1) check_files(files, args.recursive) replace_tags(files, tagmap) else: print('Укажите действие') exit(-1)
def blame (filename, args): repo, uri, out, opts = args filename = filename.strip (' \n') if filename[-1] == '/': return p = create_parser (repo.get_type (), filename) p.set_output_device (out) def feed (line, p): p.feed (line) wid = repo.add_watch (BLAME, feed, p) try: repo.blame (os.path.join (uri, filename), rev = opts.revision, mc = not opts.fast) except RepositoryCommandError, e: printerr ("Error getting blame information of path '%s': %s", (filename, e.error))
def blame(filename, args): repo, uri, out, opts = args filename = filename.strip(' \n') if filename[-1] == '/': return p = create_parser(repo.get_type(), filename) p.set_output_device(out) def feed(line, p): p.feed(line) wid = repo.add_watch(BLAME, feed, p) try: repo.blame(os.path.join(uri, filename), rev=opts.revision, mc=not opts.fast, ignore_whitespaces=True) except RepositoryCommandError, e: printerr("Error getting blame information of path '%s': %s", (filename, e.error))
def main(): args = create_parser() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed args.num_cluster = args.num_cluster.split(',') if not os.path.exists(args.exp_dir): os.mkdir(args.exp_dir) ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args)
def main(): parser = create_parser() args = parser.parse_args() args.func(args)
printerr("Unknown error creating repository for path %s (%s)", (path, str(e))) return 1 uri = repo.get_uri_for_path(path) else: uri = uri.strip('/') repo = create_repository('svn', uri) # Check uri actually points to a valid svn repo if repo.get_last_revision(uri) is None: printerr("URI %s doesn't seem to point to a valid svn repository", (uri, )) return 1 # Check we have a parser for the given repo try: p = create_parser(repo.get_type(), 'foo') except ParserUnknownError: printerr("%s repositories are not supported by guilty (yet)", (repo.get_type(), )) return 1 except Exception, e: printerr("Unknown error creating parser for repository %s (%s)", (repo.get_uri(), str(e))) return 1 del p try: out = create_output_device(config.output) except OutputDeviceUnknownError: printerr("Output type %s is not supported by guilty", (config.output, ))
def main(): parser = create_parser() db = Calendoola() argcomplete.autocomplete(parser) namespace = parser.parse_args() try: db.cfg.get_field('current_user') except configparser.NoOptionError: db.cfg.add_field('current_user') ####################################### if namespace.target == 'user': if namespace.command == 'add': co.operation_user_add(db, namespace.nickname, namespace.force) elif namespace.command == 'login': co.operation_user_login(namespace.nickname, db) elif namespace.command == 'logout': co.operation_user_logout(db) elif namespace.command == 'remove': co.operation_user_remove(db, namespace.nickname) elif namespace.command == 'info': co.operation_user_info(db) return ####################################### try: db.cfg.get_field('current_user') except django_ex.ObjectDoesNotExist: print('You did not sign in. Please login') return ####################################### if namespace.target == 'task': if namespace.command == 'add': co.operation_task_add(db, namespace.description, namespace.priority, namespace.deadline, namespace.tags, namespace.subtask) elif namespace.command == 'remove': co.operation_task_remove(db, namespace.id) elif namespace.command == 'show': co.operation_task_show(db, namespace.to_show, namespace.selected, namespace.colored) elif namespace.command == 'finish': co.operation_task_finish(db, namespace.id) elif namespace.command == 'move': co.operation_task_move(db, namespace.id_from, namespace.id_to) elif namespace.command == 'change': co.operation_task_change(db, namespace.id, namespace.info, namespace.deadline, namespace.priority, namespace.status, namespace.append_tags, namespace.remove_tags) elif namespace.command == 'share': co.operation_task_share(db, namespace.id_from, namespace.nickname_to) elif namespace.command == 'restore': co.operation_task_restore(db, namespace.id) elif namespace.command == 'unshare': co.operation_task_unshare(db, namespace.id) ####################################### elif namespace.target == 'calendar': co.operation_calendar_show( db.get_tasks(db.cfg.get_field('current_user')), namespace.date[0], namespace.date[1]) ####################################### elif namespace.target == 'plan': if namespace.command == 'add': co.operation_plan_add(db, namespace.description, namespace.period_type, namespace.period_value, namespace.time) elif namespace.command == 'show': co.operation_plan_show(db, namespace.id, namespace.colored) elif namespace.command == 'remove': co.operation_plan_remove(db, namespace.id) elif namespace.command == 'change': co.operation_plan_change(db, namespace.id, namespace.info, namespace.period_type, namespace.period_value, namespace.time) elif namespace.target == 'reminder': if namespace.command == 'add': co.operation_reminder_add(db, namespace.remind_type, namespace.remind_value) elif namespace.command == 'remove': co.operation_reminder_remove(db, namespace.id) elif namespace.command == 'apply': co.operation_reminder_apply_task(db, namespace.reminder_id, namespace.task_id) elif namespace.command == 'detach': co.operation_reminder_detach_task(db, namespace.reminder_id, namespace.task_id) elif namespace.command == 'show': co.operation_reminder_show(db, namespace.id) elif namespace.command == 'change': co.operation_reminder_change(db, namespace.id, namespace.remind_type, namespace.remind_value) co.check_instances(db)
import json, sys from pathlib import Path # I have to use `# type: ignore` so I don't get errors that these imports don't work. from helpers import error_message, getDataFolder, check_if_use_saved_directory, check_for_duplicates, set_default_directory, default_directory_exists, str2bool, dir_exists # type: ignore from parser import argc_allowed, create_parser, usage # type: ignore __doc__ = "Usage:\n" + usage argc_allowed() args = create_parser().parse_args() if args.command == 'add': if args.directory == None and not default_directory_exists(): error_message( 'Cannot use default directory if default directory not set', 3) target = args.target directory = check_if_use_saved_directory(args.directory) check_for_duplicates(target) if not Path(directory).is_dir(): # Checks if directory not found error_message("Directory not found (may be a file)", 1) create_new = "not bool" while create_new == "not bool": create_new = str2bool(input(f"Create new directory {directory}: ")) if create_new:
np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed_all(0) torch.set_default_tensor_type(torch.FloatTensor) TORCH_VERSION = torch.__version__ TORCH_CUDA_VERSION = torch.version.cuda CUDNN_VERSION = str(cudnn.version()) DEVICE_NAME = torch.cuda.get_device_name() # cudnn.benchmark = True cudnn.deterministic = True os.environ['CUDA_VISIBLE_DEVICES'] = '1' device = torch.device("cuda" if torch.cuda.is_available() else "cpu") parser = create_parser() dataset_name = 'Cityscapes' ckpt_name = 'Semantic_Segmentation_Cityscapes_27.pth.tar' def evaluation(model, dataloader): # result dir result_dir = parser.result_dir # label dir label_file = os.path.join(parser.list_dir, 'info.json') # label mapping with open(label_file, 'r') as fp: info = json.load(fp) label_map = {} label = info["label"]
from typecheck import TypeChecker from irgen import IRGen from ast import ASTError from main import all_phases pkg_resources.require('ply') pkg_resources.require('llvmlite==0.21.*') pkg_resources.require('termcolor') tests_dir = 'test' fennec_ext = '.fc' alignment = 60 root_path = os.path.dirname(os.path.abspath(__file__)) tests_path = root_path + '/' + tests_dir parser = create_parser(debug=True) class TestCase: default_timeout = 3.0 # in seconds, overwriteable with envvar TIMEOUT def __init__(self, path, phase, test_type, basename): self.path = path self.phase = phase self.test_type = test_type self.basename = basename self.ferr = StringIO() self.generated_files = [] def run(self): showpath = self.path + ':'
import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data as data import torchvision.transforms as transforms import torchvision.datasets as datasets from gumi.ops import * from gumi import models from gumi import model_utils from gumi.pruning import mask_utils import cifar_utils from parser import create_parser parser = create_parser(prog='Generate an optimised group configuration.') args = parser.parse_args() # CUDA os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id use_cuda = torch.cuda.is_available() cudnn.benchmark = True def factors(n): """ Copied from - https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python """ return set( functools.reduce( list.__add__,
logging.getLogger().setLevel(logging.DEBUG) import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data as data import torchvision.transforms as transforms import torchvision.datasets as datasets from parser import create_parser from group_exporter import GroupExporter parser = create_parser(prog="Export from a mask based model to a grouped one.") args = parser.parse_args() # CUDA os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id use_cuda = torch.cuda.is_available() cudnn.benchmark = True def main(): exporter = GroupExporter(args) exporter.export() if __name__ == "__main__": main()
return 1 except Exception, e: printerr ("Unknown error creating repository for path %s (%s)", (path, str (e))) return 1 uri = repo.get_uri_for_path (path) else: uri = uri.strip ('/') repo = create_repository ('svn', uri) # Check uri actually points to a valid svn repo if repo.get_last_revision (uri) is None: printerr ("URI %s doesn't seem to point to a valid svn repository", (uri,)) return 1 # Check we have a parser for the given repo try: p = create_parser (repo.get_type (), 'foo') except ParserUnknownError: printerr ("%s repositories are not supported by guilty (yet)", (repo.get_type (),)) return 1 except Exception, e: printerr ("Unknown error creating parser for repository %s (%s)", (repo.get_uri (), str (e))) return 1 del p try: out = create_output_device (config.output) except OutputDeviceUnknownError: printerr ("Output type %s is not supported by guilty", (config.output,)) return 1 except OutputDeviceError, e: printerr (str(e))
def main(): parser = create_parser() args = parser.parse_args() trans_savnet_main(args)
def main(): parser = create_parser() args = parser.parse_args() savnet_main(args)
def main(): parser = create_parser() args = parser.parse_args() run(args)