Example #1
0
def move_logs(from_path, to_path):
	now = datetime.datetime.now()
	exempt_filepath = now.strftime("%Y/%m/%d")

	G = glob.glob(from_path+'*/*/*/*/*')

	for g in G:
		filepath = g.split(from_path)[1]

		if '/'.join(filepath.split('/')[:3]) != exempt_filepath:
			util.build_dirs(to_path, filepath)

			shutil.move(g, to_path+filepath)
Example #2
0
def main():
    global test_locked
    argparser = argparse.ArgumentParser(description="Run WDL proxy tests")
    argparser.add_argument("--do-not-build",
                           help="Do not assemble the dxWDL jar file",
                           action="store_true",
                           default=False)
    argparser.add_argument("--project",
                           help="DNAnexus project ID",
                           default="dxWDL_playground")
    argparser.add_argument("--verbose",
                           help="Verbose compilation",
                           action="store_true",
                           default=False)
    args = argparser.parse_args()

    print("top_dir={} test_dir={}".format(top_dir, test_dir))

    version_id = util.get_version_id(top_dir)
    project = util.get_project(args.project)
    if project is None:
        raise RuntimeError("Could not find project {}".format(args.project))
    folder = util.build_dirs(project, version_id)
    print("project: {} ({})".format(project.name, project.get_id()))
    print("folder: {}".format(folder))

    test_dict = {"aws:us-east-1": project.name + ":" + folder}

    # build the dxWDL jar file, only on us-east-1
    #if not args.do_not_build:
    #    util.build(project, folder, version_id, top_dir, test_dict)

    # For some reason, the ip-tables state is not completely
    # cleared. Therefore, we need to run those tests first.
    test_networking(project, folder, version_id)
    test_authorization(project, folder, version_id)
Example #3
0
def main():
    global test_unlocked
    argparser = argparse.ArgumentParser(
        description="Run WDL compiler tests on the platform")
    argparser.add_argument("--archive",
                           help="Archive old applets",
                           action="store_true",
                           default=False)
    argparser.add_argument("--compile-only",
                           help="Only compile the workflows, don't run them",
                           action="store_true",
                           default=False)
    argparser.add_argument("--compile-mode", help="Compilation mode")
    argparser.add_argument("--debug",
                           help="Run applets with debug-hold, and allow ssh",
                           action="store_true",
                           default=False)
    argparser.add_argument("--delay-workspace-destruction",
                           help="Run applets with delayWorkspaceDestruction",
                           action="store_true",
                           default=False)
    argparser.add_argument("--force",
                           help="Remove old versions of applets and workflows",
                           action="store_true",
                           default=False)
    argparser.add_argument(
        "--folder", help="Use an existing folder, instead of building dxWDL")
    argparser.add_argument("--lazy",
                           help="Only compile workflows that are unbuilt",
                           action="store_true",
                           default=False)
    argparser.add_argument("--list",
                           "--test-list",
                           help="Print a list of available tests",
                           action="store_true",
                           dest="test_list",
                           default=False)
    argparser.add_argument("--locked",
                           help="Generate locked-down workflows",
                           action="store_true",
                           default=False)
    argparser.add_argument("--project",
                           help="DNAnexus project ID",
                           default="dxWDL_playground")
    argparser.add_argument(
        "--project-wide-reuse",
        help="look for existing applets in the entire project",
        action="store_true",
        default=False)
    argparser.add_argument("--stream-all-files",
                           help="Stream all input files with dxfs2",
                           action="store_true",
                           default=False)
    argparser.add_argument(
        "--runtime-debug-level",
        help="printing verbosity of task/workflow runner, {0,1,2}")
    argparser.add_argument("--test",
                           help="Run a test, or a subgroup of tests",
                           action="append",
                           default=[])
    argparser.add_argument("--unlocked",
                           help="Generate only unlocked workflows",
                           action="store_true",
                           default=False)
    argparser.add_argument("--verbose",
                           help="Verbose compilation",
                           action="store_true",
                           default=False)
    argparser.add_argument("--verbose-key",
                           help="Verbose compilation",
                           action="append",
                           default=[])
    args = argparser.parse_args()

    print("top_dir={} test_dir={}".format(top_dir, test_dir))

    register_all_tests(args.verbose)
    if args.test_list:
        print_test_list()
        exit(0)
    test_names = []
    if len(args.test) == 0:
        args.test = 'M'
    for t in args.test:
        test_names += choose_tests(t)
    print("Running tests {}".format(test_names))
    version_id = util.get_version_id(top_dir)

    project = util.get_project(args.project)
    if project is None:
        raise RuntimeError("Could not find project {}".format(args.project))
    if args.folder is None:
        base_folder = util.build_dirs(project, version_id)
    else:
        # Use existing prebuilt folder
        base_folder = args.folder
    applet_folder = base_folder + "/applets"
    test_folder = base_folder + "/test"
    print("project: {} ({})".format(project.name, project.get_id()))
    print("folder: {}".format(base_folder))

    test_dict = {"aws:us-east-1": project.name + ":" + base_folder}

    # build the dxWDL jar file, only on us-east-1
    util.build(project, base_folder, version_id, top_dir, test_dict)

    if args.unlocked:
        # Disable all locked workflows
        args.locked = False
        test_unlocked = test_names

    compiler_flags = []
    if args.locked:
        compiler_flags.append("-locked")
        test_unlocked = []
    if args.archive:
        compiler_flags.append("-archive")
    if args.compile_mode:
        compiler_flags += ["-compileMode", args.compile_mode]
    if args.force:
        compiler_flags.append("-force")
    if args.verbose:
        compiler_flags.append("-verbose")
    if args.stream_all_files:
        compiler_flags.append("-streamAllFiles")
    if args.verbose_key:
        for key in args.verbose_key:
            compiler_flags += ["-verboseKey", key]
    if args.runtime_debug_level:
        compiler_flags += ["-runtimeDebugLevel", args.runtime_debug_level]
    if args.project_wide_reuse:
        compiler_flags.append("-projectWideReuse")

    #  is "native" included in one of the test names?
    if ("call_native" in test_names or "call_native_v1" in test_names):
        native_call_setup(project, applet_folder, version_id, args.verbose)
    if "call_native_app" in test_names:
        native_call_app_setup(project, version_id, args.verbose)

    try:
        # Compile the WDL files to dx:workflows and dx:applets
        runnable = compile_tests_to_project(project, test_names, applet_folder,
                                            compiler_flags, version_id,
                                            args.lazy)
        if not args.compile_only:
            run_test_subset(project, runnable, test_folder, args.debug,
                            args.delay_workspace_destruction)
    finally:
        print("Completed running tasks in {}".format(args.project))
Example #4
0
import datetime
import os

import util

base_path = '/home/pi/audiolog/'

now = datetime.datetime.now()
filename = now.strftime("%Y/%m/%d/%H/aud-%Y%m%d-%H%M%S.wav")

util.build_dirs(base_path, filename)

cmd = "arecord -f S16_LE -d 10 "+base_path+filename
os.system(cmd)
Example #5
0
                    type=str,
                    choices=['center', 'random'],
                    help='Location of the noise')
parser.add_argument('--poison_rate', default=1.0, type=float)
parser.add_argument('--perturb_tensor_filepath', default=None, type=str)
args = parser.parse_args()

# Set up Experiments
if args.exp_name == '':
    args.exp_name = 'exp_' + datetime.datetime.now()

exp_path = os.path.join(args.exp_name, args.version)
log_file_path = os.path.join(exp_path, args.version)
checkpoint_path = os.path.join(exp_path, 'checkpoints')
checkpoint_path_file = os.path.join(checkpoint_path, args.version)
util.build_dirs(exp_path)
util.build_dirs(checkpoint_path)
logger = util.setup_logger(name=args.version, log_file=log_file_path + ".log")

# CUDA Options
logger.info("PyTorch Version: %s" % (torch.__version__))
if torch.cuda.is_available():
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    device = torch.device('cuda')
    device_list = [
        torch.cuda.get_device_name(i)
        for i in range(0, torch.cuda.device_count())
    ]
    logger.info("GPU List: %s" % (device_list))
Example #6
0
            mean_acc = sum(running_sum)/len(running_sum)
            std = statistics.stdev(running_sum) if len(running_sum) > 1 else 0
            exp_results[loss_name][noise_rate]['avg_last_acc'] = mean_acc
            exp_results[loss_name][noise_rate]['std_last_acc'] = std


def load_results(dataset_name, noise_rate_type, exp_names):
    exp_results = collections.defaultdict(dict)
    exp_results_file_name = '%s_%s_exp_results.json' % (dataset_name, noise_rate_type)
    for exp in exp_names:
        path = os.path.join(exp)
        target_data_set_exp = os.path.join(path, dataset_name)
        load_dataset_results(exp_results=exp_results,
                             dataset_target=target_data_set_exp,
                             exp_name=exp,
                             noise_rate_type=noise_rate_type)
    process_avg_table(exp_results)
    with open(os.path.join('results', exp_results_file_name), 'w') as outfile:
        json.dump(exp_results, outfile)


if __name__ == '__main__':
    util.build_dirs('results')
    dataset_names = ['mnist', 'cifar10', 'cifar100']
    noise_rate_types = ['sym', 'asym']
    exp_names = ['run1', 'run2', 'run3']

    for dataset_name in dataset_names:
        for noise_rate_type in noise_rate_types:
            load_results(dataset_name, noise_rate_type, exp_names)