コード例 #1
0
def main():
    args = parse_args(should_use_model=False, should_use_rl=True)
    args.encoder_type = "identity"
    args.decoder_type = "identity"
    args.num_train_envs = 1
    args.num_eval_envs = 1

    (
        vec_train_envs,
        vec_eval_envs,
        max_episode_steps,
        video_dir,
        model_dir,
        buffer_dir,
        video,
        device,
        replay_buffer,
        agent,
        L,
    ) = bootstrap_expert(args=args)

    train_agent(
        args=args,
        vec_train_envs=vec_train_envs,
        vec_eval_envs=vec_eval_envs,
        L=L,
        agent=agent,
        video=video,
        model_dir=model_dir,
        train_replay_buffer=replay_buffer,
        eval_replay_buffer=None,
        buffer_dir=buffer_dir,
        max_episode_steps=max_episode_steps,
    )
コード例 #2
0
def initialize():
    args = parse_args()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    torch.backends.cudnn.benchmark = True
    #torch.backends.cudnn.deterministic = True

    assert len(args.learning_rates) == 1 or \
           len(args.learning_rates) == len(args.lambda_values), \
           'Learning rates list should be length 1 or equal to lambda list length'
    if len(args.learning_rates) == 1:
        args.learning_rates *= len(args.lambda_values)

    args.lambda_values, args.learning_rates = zip(
        *sorted(zip(args.lambda_values, args.learning_rates)))

    assert (min(args.lambda_values) >= 0.0 and max(args.lambda_values) <= 1.0),\
           'Lambda values outside of [0, 1]'
    use_cuda = torch.cuda.is_available() and not args.cpu

    output_dir = os.path.join('results', args.output_directory, str(args.seed))
    os.makedirs(output_dir, exist_ok=True)

    with open(os.path.join(output_dir, 'args.json'), 'w') as f:
        json.dump(vars(args), f, indent=2)

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)

    return args, use_cuda, output_dir
コード例 #3
0
def main():
    args = parse_args(should_use_model=True, should_use_rl=True)

    args.load_model = True
    args.return_both_pixel_and_state = True
    args.change_angle = True
    args.num_train_envs = 2
    args.num_eval_envs = 1

    (
        logbook,
        device,
        vec_train_envs,
        vec_eval_envs,
        state_space_obs,
        pixel_space_obs,
        action_size,
        train_replay_buffer,
        eval_replay_buffer,
        logging_dict,
        max_episode_steps,
    ) = bootstrap_envs_and_buffer(args)

    args.encoder_type = "identity"
    args.decoder_type = "identity"
    (video_dir, model_dir, buffer_dir, video, device, agent, L,) = bootstrap_agent(
        args, state_space_obs.shape, action_size, device
    )

    model_dir, step = args.load_model_path.rsplit("_", 1)
    agent.load(
        model_dir=model_dir, step=step,
    )

    start = time()
    # collect data across environments

    train_replay_buffer = utils.collect_both_state_and_obs_using_policy_vec(
        vec_env=vec_train_envs,
        num_samples=50000,
        replay_buffer=train_replay_buffer,
        policy=agent,
        save_video=args.save_video,
    )

    train_replay_buffer.save(f"{args.save_buffer_path}/train")

    eval_replay_buffer = utils.collect_both_state_and_obs_using_policy_vec(
        vec_env=vec_eval_envs,
        num_samples=50000,
        replay_buffer=eval_replay_buffer,
        policy=agent,
        save_video=False,
    )

    eval_replay_buffer.save(f"{args.save_buffer_path}/eval")

    end = time()
    print(f" Time to collect {args.num_samples} datapoints = {end - start}")
コード例 #4
0
def main():

    ret, root_path, old_version, new_version, token = argument_parser.parse_args()

    if ret == False:
        return

    tag_repos(root_path,new_version)
コード例 #5
0
def main():
    print(
        "Welcome to Group Finance. \n"
        "This client is continuously listening for commands. Input a command and press Enter.\n"
        "If you do not know the commands, check out README.md")

    while True:
        line = input()
        if line == 'exit':
            print("bye")
            break

        try:
            parse_args(line.split(" "))
        except ArgumentParserError as err:
            print(err)

        print("")
コード例 #6
0
def main():

    ret, root_path, old_version, new_version, token = argument_parser.parse_args(
    )

    if ret == False:
        return

    release(root_path, old_version, new_version, token)
コード例 #7
0
ファイル: update_version.py プロジェクト: bettis007/bitprim
def main():

    ret, root_path, old_version, new_version, token = argument_parser.parse_args()

    if ret == False:
        return

    for project in projects:
        os.chdir(root_path)
        update_version(root_path, project, old_version[0], old_version[1], old_version[2], new_version[0], new_version[1], new_version[2])
コード例 #8
0
def main():
    # Receive the input arguments from command line.
    args = argument_parser.parse_args()

    # This path is command line input from the user. This is the root for the filetree of the processed images
    root_path_tuberculose_images_divided = args.path_target

    # This path is command line input from the user. This is where the downloaded raw images are stored right now
    image_origin = args.path_source

    # Define the paths for training, validating and testing
    train_paths = define_paths(path_base=root_path_tuberculose_images_divided, step="Train")
    valid_paths = define_paths(path_base=root_path_tuberculose_images_divided, step="Valid")
    test_paths = define_paths(path_base=root_path_tuberculose_images_divided, step="Test")

    # Create the folders for storing the training, validating and testing images
    create_folders(train_paths)
    create_folders(valid_paths)
    create_folders(test_paths)

    sick_healthy_dict = get_image_titles(image_origin)

    # Get the names of the images and convert the dict.keys object to a list
    dataset = list(sick_healthy_dict.keys())

    # Set the size (as fraction) for the training set and the validation set. The remaining samples will be used for testing
    training_set_size = (1 / 2)
    validation_set_size = (1 / 4)

    # Calculate the absolute number of training and validation instances
    number_training_instances = calculate_number_of_instances(dataset, training_set_size)
    number_validation_instances = calculate_number_of_instances(dataset, validation_set_size)

    # Create the training, validation and testing sets
    training_data, dataset = get_random_instances(dataset, number_training_instances)
    validation_data, dataset = get_random_instances(dataset, number_validation_instances)
    testing_data = dataset

    # Create lists with the training, validating and testing labels
    training_labels_list = create_labels_list(training_data, sick_healthy_dict)
    validating_labels_list = create_labels_list(validation_data, sick_healthy_dict)
    testing_labels_list = create_labels_list(testing_data, sick_healthy_dict)

    write_images_to_folder(training_data, sick_healthy_dict, train_paths, image_origin)
    print("Done training images")
    write_images_to_folder(validation_data, sick_healthy_dict, valid_paths, image_origin)
    print("Done validating images")
    write_images_to_folder(testing_data, sick_healthy_dict, test_paths, image_origin)
    print("Done testing images")
コード例 #9
0
import pandas as pd
from argument_parser import parse_args
import report_functions

# parse command line args
args = parse_args()

with pd.ExcelWriter("res/" + "Report confronto " + args["name"] + ".xlsx") as excel_writer:
    # import files to compare: monitoring (status sheet) and rvtools (network)
    monitoring_file = pd.read_excel(args["monitoring"], sheet_name="Status")
    rvtools_file = pd.read_excel(
            args["rvtools"],
            sheet_name="vNetwork",
            na_values="unknown")
    rvtools_file = rvtools_file.dropna(subset=["IP Address"])[rvtools_file["Powerstate"] == "poweredOn"]

    # pick each vm with its ip address in rvtools
    rvtools_name_ip = dict(zip(list(rvtools_file["VM"]), [list(filter(
        lambda single_ip: ("." in single_ip), ip_list.replace(" ", "").split(",")))[0]
                                                            for ip_list in list(rvtools_file["IP Address"])]))

    # pick each vm with its ip address in monitoring
    monitoring_name_ip = dict(zip(list(monitoring_file["Hostname"]), list(monitoring_file["IP"])))

    # write not monitored vms onto the report
    report_functions.write_not_monitored_vms(
        rvtools_name2ip=rvtools_name_ip,
        monitoring_name2ip=monitoring_name_ip,
        excel_writer=excel_writer)

    # write vm in monitoring but not in rvtools
コード例 #10
0
ファイル: train.py プロジェクト: parth-collab/icp-block-mdp
    models = bootstrap_models_and_optimizers(
        args=args,
        obs_shape=obs_shape,
        action_size=action_size,
        device=device,
        logbook=logbook,
    )

    # Train loop
    train_model(
        args=args,
        logbook=logbook,
        device=device,
        train_envs=vec_train_envs,
        eval_envs=vec_eval_envs,
        obs_shape=obs_shape,
        action_size=action_size,
        models=models,
        train_replay_buffer=train_replay_buffer,
        eval_replay_buffer=eval_replay_buffer,
        logging_dict=logging_dict,
        num_iters=args.num_iters,
    )


if __name__ == "__main__":
    args = argument_parser.parse_args(should_use_model=True,
                                      should_use_rl=False)
    main(args)
コード例 #11
0
ファイル: gelpia.py プロジェクト: soarlab/gelpia
def main():
    setup_requirements(base_dir)

    parsing_start = time.time()
    arg_dict = ap.parse_args()

    # Add to paths used during runtime for our rust libs
    append_to_environ("PATH", bin_dir)
    rust_ld_lib_addition = path.join(base_dir, ".compiled")
    if arg_dict["debug"]:
        rust_ld_lib_addition += ":" + path.join(base_dir,
                                                "src/func/target/debug/")
        rust_ld_lib_addition += ":" + path.join(base_dir, "target/debug/deps")
        # Set debug mode in case of a crash
        append_to_environ("RUST_BACKTRACE", "1")
    else:
        rust_ld_lib_addition += ":" + path.join(base_dir,
                                                "src/func/target/release/")
        rust_ld_lib_addition += ":" + path.join(base_dir,
                                                "target/release/deps")
    append_to_environ("LD_LIBRARY_PATH", rust_ld_lib_addition)

    # Grab input interval variables, use them for the function translation,
    # and write them out to a rust file
    # start_box, dimensions, variables = parse_box2(arg_dict["input"])
    # import function_to_rust
    # tup = function_to_rust.translate(arg_dict["function"], variables)
    # (function, constants, part) = tup

    inputs = [tup[1] for tup in arg_dict['inputs']]
    inputs = "|".join(inputs)

    file_id = mk_file_hash(arg_dict["rust_function"])
    function_filename = path.join(
        src_dir, "func/src/lib_generated_{}.rs".format(file_id))

    if arg_dict["debug"]:
        executable = path.join(base_dir, 'target/debug/cooperative')
    else:
        executable = path.join(base_dir, 'target/release/cooperative')

    # Log output
    executable_args = [
        '-c',
        arg_dict["constants"],
        '-f',
        arg_dict["interp_function"],
        '-i',
        inputs,
        "-x",
        str(arg_dict["input_epsilon"]),
        "-y",
        str(arg_dict["output_epsilon"]),
        "-S",
        "generated_" + file_id,  # Function file suffix
        "-n",
        ",".join(b[0] for b in arg_dict["inputs"]),
        "-t",
        str(arg_dict["timeout"]),
        "-u",
        str(arg_dict["update"]),
        "-d" if arg_dict["debug"] else "",  # If a debug run
        "-L" if arg_dict["logfile"] else ""
    ]

    iu.log(1, iu.cyan("Interpreted: ") + arg_dict["interp_function"])
    iu.log(1, iu.cyan("Rust: ") + arg_dict["rust_function"])
    iu.log(1, iu.cyan("Domain: ") + inputs)
    iu.log(
        1,
        iu.cyan("Variables: ") + ", ".join(b[0] for b in arg_dict["inputs"]))
    iu.log(1, iu.cyan("Command: ") + ' '.join([executable] + executable_args))

    parsing_end = time.time()

    # Use try so that we can catch control-c easily
    output = None
    logging = bool(arg_dict["logfile"])
    log_file = arg_dict["logfile"] if type(
        arg_dict["logfile"]) is str else None

    try:
        with open(function_filename, 'w') as f:
            f.write(arg_dict["rust_function"])

        if log_file:
            with open(log_file, 'w') as f2:
                f2.write("")
                f2.flush()

        start = time.time()
        term_time = None
        if arg_dict["timeout"] != 0:
            if arg_dict["grace"] == 0:
                term_time = start + arg_dict["timeout"] * 2
            else:
                term_time = start + arg_dict["grace"]

        iu.log(1, iu.cyan("Running"))
        for line in iu.run_async(executable, executable_args, term_time):
            if logging and line.startswith("lb:"):  # Hacky
                print(line.strip(), file=sys.stderr)
                if log_file:
                    with open(log_file, 'a') as f2:
                        f2.write(line.strip())
                        f2.write('\n')
                        f2.flush()
            else:
                if arg_dict['dreal']:
                    match = re.search("\[([^,]+),(.*)", line)
                    if match:
                        line = "[{},{}".format(-float(match.group(1)),
                                               match.group(2))
                print(line.strip())
    except KeyboardInterrupt:
        iu.warning("Caught ctrl-C, exiting now")
    finally:
        if not arg_dict["debug"]:
            os.remove(function_filename)
            try:
                os.remove(
                    path.join(base_dir, ".compiled/libfunc_generated_" +
                              file_id + ".so"))
            except:
                pass

            try:
                p = path.join(
                    src_dir,
                    "func/target/release/libfunc_generated_" + file_id + ".so")
                os.remove(p)
            except:
                pass

            try:
                p = path.join(
                    src_dir,
                    "func/target/release/func_generated_" + file_id + ".d")
                os.remove(p)
            except:
                pass
        end = time.time()

    if output:
        print(output)

    iu.log(0, iu.green("Parsing time: ") + str(parsing_end - parsing_start))
    iu.log(0, iu.green("Solver time: ") + str(end - start))
コード例 #12
0
import argument_parser
from pprint import pprint

args = argument_parser.parse_args()
pprint(vars(args))

import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_HOME"] = "/nfs/xs/local/cuda-10.2"

if len(args.gpu_ids) > 1:
    args.sync_bn = True

from torch.utils.tensorboard import SummaryWriter
from datasets.build_datasets import build_datasets

from model.bisenet import BiSeNet

from utils.saver import Saver
from utils.trainer_kd import Trainer
from utils.misc import AccCaches, get_curtime, print_model_parm_nums, load_state_dict
import numpy as np


def main():
    # dataset
    trainset, valset, testset = build_datasets(args.dataset, args.base_size,
                                               args.crop_size)

    # 定义 student/teacher 模型
コード例 #13
0
# /usr/bin/env python

# TODO(John): Correct styling according to Google's Style Guide
# TODO(John): Make this script importable (add a main method)
# TODO(John): Figure out how to end script the correct way

from subprocess import call
import os
import time
from helpers import cd
from argument_parser import parse_args

# parse and store arguments
arguments = parse_args()

def download_sra():
	# check if SRA directory exists, if not create it
	if not os.path.exists(arguments.dir + '/SRA'):
		print(time.strftime('[%a, %I:%M]'), 'Creating directory /SRA..')
		os.makedirs(arguments.dir + '/SRA')
	# move to newly created directory, download SRA files
	with cd(arguments.dir + '/SRA'):
		print(time.strftime("[%a, %I:%M]"), "Downloading SRA file: " + SRA\
		 + " to " + arguments.dir + '/SRA')
		# call fastq-dump using the SRA accension.
		if arguments.paired:
			call(["fastq-dump", "--split-files", SRA, "-gzip", ">",\
			"../log/fastq_dump_log_{}.txt".\
			format(time.strftime("[%a:%I:%M]"))])
		else:
			call(["fastq-dump", SRA, "-gzip", ">",\
コード例 #14
0
def main():

    args = argument_parser.parse_args()
    print(args)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = True
    # hardcoding scannet

    # get handle to lmdb dataset
    lmdb_handle = dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT, args.dataset, "dataset.lmdb"), args.memory_hog)
    
    # create train val and test sets
    train_set = get_active_dataset(args.active_selection_mode)(args.dataset, lmdb_handle, args.superpixel_dir, args.base_size, 'seedset_0')
    val_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'val')
    test_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'test')

    class_weights = None
    if args.use_balanced_weights:
        class_weights = calculate_weights_labels(get_active_dataset(args.active_selection_mode)(args.dataset, lmdb_handle, args.superpixel_dir, args.base_size, 'train'))

    saver = Saver(args)
    saver.save_experiment_config()
    summary = TensorboardSummary(saver.experiment_dir)
    writer = summary.create_summary()

    # get active selection method
    active_selector = get_active_selector(args, lmdb_handle, train_set)


    # for each active selection iteration
    for selection_iter in range(args.max_iterations):

        fraction_of_data_labeled = int(round(train_set.get_fraction_of_labeled_data() * 100))
        
        if os.path.exists(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections")):
            # resume: load selections if this is a rerun, and selections are available from a previous run
            train_set.load_selections(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections"))
        elif os.path.exists(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections.txt")):
            # resume: load selections if this is a rerun, and selections are available from a previous run
            train_set.load_selections(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections.txt"))
        else:
            # active selection iteration

            train_set.make_dataset_multiple_of_batchsize(args.batch_size)
            # create model from scratch
            model = DeepLab(num_classes=train_set.num_classes, backbone=args.backbone, output_stride=args.out_stride, sync_bn=args.sync_bn,
                            mc_dropout=((args.active_selection_mode.startswith('viewmc')) or(args.active_selection_mode.startswith('vote')) or args.view_entropy_mode == 'mc_dropout'))
            model = model.cuda()
            # create trainer
            trainer = Trainer(args, model, train_set, val_set, test_set, class_weights, Saver(args, suffix=f'runs_{fraction_of_data_labeled:03d}'))
            
            # train for args.epochs epochs
            lr_scheduler = trainer.lr_scheduler
            for epoch in range(args.epochs):
                trainer.training(epoch)
                if epoch % args.eval_interval == (args.eval_interval - 1):
                    trainer.validation(epoch)
                if lr_scheduler:
                    lr_scheduler.step()

            train_set.reset_dataset()
            epoch = trainer.load_best_checkpoint()

            # get best val miou / metrics
            _, best_mIoU, best_mIoU_20, best_Acc, best_Acc_class, best_FWIoU = trainer.validation(epoch, test=True)

            trainer.evaluator.dump_matrix(os.path.join(trainer.saver.experiment_dir, "confusion_matrix.npy"))

            writer.add_scalar('active_loop/mIoU', best_mIoU, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/mIoU_20', best_mIoU_20, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/Acc', best_Acc, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/Acc_class', best_Acc_class, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/fwIoU', best_FWIoU, train_set.get_fraction_of_labeled_data() * 100)

            # make active selection
            active_selector.select_next_batch(model, train_set, args.active_selection_size)
            # save selections
            trainer.saver.save_active_selections(train_set.get_selections(), args.active_selection_mode.endswith("_region"))
            trainer.train_writer.close()
            trainer.val_writer.close()

        print(selection_iter, " / Train-set length: ", len(train_set))
        
    writer.close()
コード例 #15
0
ファイル: train.py プロジェクト: zhouleiSJTU/ViewAL
def main():

    # script for training a model using 100% train set

    args = argument_parser.parse_args()
    print(args)
    torch.manual_seed(args.seed)

    lmdb_handle = dataset_base.LMDBHandle(
        os.path.join(constants.HDD_DATASET_ROOT, args.dataset, "dataset.lmdb"),
        args.memory_hog)
    train_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size,
                             'train')
    val_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'val')
    test_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'test')
    train_set.make_dataset_multiple_of_batchsize(args.batch_size)

    model = DeepLab(num_classes=train_set.num_classes,
                    backbone=args.backbone,
                    output_stride=args.out_stride,
                    sync_bn=args.sync_bn)
    model = model.cuda()

    class_weights = None
    if args.use_balanced_weights:
        class_weights = calculate_weights_labels(train_set)

    saver = Saver(args)
    trainer = Trainer(args, model, train_set, val_set, test_set, class_weights,
                      Saver(args))
    summary = TensorboardSummary(saver.experiment_dir)
    writer = summary.create_summary()

    start_epoch = 0
    if args.resume:
        args.resume = os.path.join(constants.RUNS, args.dataset, args.resume,
                                   'checkpoint.pth.tar')
        if not os.path.isfile(args.resume):
            raise RuntimeError(f"=> no checkpoint found at {args.resume}")
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch']
        trainer.model.load_state_dict(checkpoint['state_dict'])
        trainer.optimizer.load_state_dict(checkpoint['optimizer'])
        trainer.best_pred = checkpoint['best_pred']
        print(
            f'=> loaded checkpoint {args.resume} (epoch {checkpoint["epoch"]})'
        )

    lr_scheduler = trainer.lr_scheduler

    for epoch in range(start_epoch, args.epochs):
        trainer.training(epoch)
        if epoch % args.eval_interval == (args.eval_interval - 1):
            trainer.validation(epoch)
        if lr_scheduler:
            lr_scheduler.step()

    epoch = trainer.load_best_checkpoint()
    _, best_mIoU, best_mIoU_20, best_Acc, best_Acc_class, best_FWIoU = trainer.validation(
        epoch, test=True)

    writer.add_scalar('test/mIoU', best_mIoU, epoch)
    writer.add_scalar('test/mIoU_20', best_mIoU_20, epoch)
    writer.add_scalar('test/Acc', best_Acc, epoch)
    writer.add_scalar('test/Acc_class', best_Acc_class, epoch)
    writer.add_scalar('test/fwIoU', best_FWIoU, epoch)

    trainer.train_writer.close()
    trainer.val_writer.close()