def main():
    experiment_name = "Resnet_DY_05"
    epoch = 100

    experiment_path = os.path.join("experiments", experiment_name)
    opt = Options(suppress_parse=True)
    opt.load_from_file(os.path.join(experiment_path, "config.yaml"))
    opt.checkpoint_path = os.path.join(experiment_path,
                                       f"{experiment_name}_{epoch}.pth")
    assert opt.use_dynamic

    model = get_inspect_model(opt)

    test_dl = data.create_data_loader(opt, "test")
    test_score = test(model, opt.temperature[1], test_dl, opt.device)

    entropy = compute_entropy(attentions_register)

    print(test_score)
    print(entropy)
示例#2
0
def start():
    # very good Mean Square Error: 2.433159948965294e-11
    # options = Options(max_iter=1000, learning_rate=0.1, max_polynomial_degree=8,
    #                   divisions_quantity=7, train_percents=0.8)
    options = Options(max_iter=1000, learning_rate=0.1, max_polynomial_degree=8,
                      divisions_quantity=7, train_percents=0.75, alpha_iterations=13 ** 5, alpha_learning_rate=0.01)
    files_handler = FilesHandler()
    set_points = files_handler.get_points_from_file(files_handler.get_set_file_from_options())
    test_points = files_handler.get_test_points_from_stdin()

    apply_calculations(options, set_points, test_points)
    return 0
def parse_args(argv):
    """
    Parses command line arguments form an options object
    :param argv:
    :return:
    """
    parser = ArgumentParser(prog="stock-correlated-news-harvester")
    parser.add_argument('--stock_histories_file_path', metavar='Stock File Path',
                        type=str, required=True)
    parser.add_argument('--stock_symbol_mapping_file_path', metavar='Stock Symbol Mapping File Path',
                        type=str, required=True)
    parser.add_argument('--twitter_config_file_path', metavar='Twitter Credentials Config File',
                        type=str, required=True)
    parser.add_argument('--results_path', metavar='Results Path',
                        type=str, required=True)

    return parser.parse_args(argv, namespace=Options())
def main():
	fpath = argv[1]
	with open(fpath, 'r') as f:
		data = f.read()
	
	auto_process_libs = [
		'math',
		'os.path',
	]

	auto_import_libs = [
		*auto_process_libs,
		'os',
	]

	options = Options(
		debug=True,
		imports=auto_import_libs,
		eval_mod=auto_process_libs,
	)

	data = PreProcessor(options=options).preprocess(data)
	data = Transpiler(options=options).transpile(data)
	tree = parse(data)
	tree = Generator(options=options).generate(tree)
	tree = Optimizer(options=options).optimize(tree)
	tree = Inliner(options=options).inline(tree)
	tree = Importer( options=options).clean_imports(tree)
	tree = UnusedRemover(options=options).remove_unused(tree)
	code = Unparser.unparse(tree)
	code = Minifier(options=options).minify(code)

	log(dump(tree))
	log(code.replace('\n', '\\n\n'))

	path_parts = fpath.split('.')
	out_path = join(getcwd(), 'dist',
					''.join(path_parts[:-1])[1:] + '.py')
	out_dir = dirname(out_path)

	if not exists(out_dir):
		mkdir(out_dir)

	with open(out_path, 'w') as f:
		f.write(code)
示例#5
0
import numpy as np
import os
import torch
import torch.nn as nn
import torch.multiprocessing as mp

from utils.options import Options
from utils.factory import GlobalLogsDict, ActorLogsDict, LearnerLogsDict, EvaluatorLogsDict
from utils.factory import LoggersDict, ActorsDict, LearnersDict, EvaluatorsDict, TestersDict
from utils.factory import EnvsDict, MemoriesDict, ModelsDict

if __name__ == '__main__':
    mp.set_start_method("spawn", force=True)

    opt = Options()
    torch.manual_seed(opt.seed)

    env_prototype = EnvsDict[opt.env_type]
    memory_prototype = MemoriesDict[opt.memory_type]
    model_prototype = ModelsDict[opt.model_type]

    # dummy env to get state/action/reward/gamma/terminal_shape & action_space
    dummy_env = env_prototype(opt.env_params, 0)
    opt.state_shape = dummy_env.state_shape
    opt.action_shape = dummy_env.action_shape
    opt.action_space = dummy_env.action_space
    opt.reward_shape = opt.agent_params.num_tasks
    opt.gamma_shape = opt.agent_params.num_tasks
    opt.terminal_shape = opt.agent_params.num_tasks
    del dummy_env
from data.robotic_contour_dataloader import get_train_dataloader, get_val_dataloader
from utils.options import Options
from data.utils.prepare_data import get_split

from train_engine_contour import TrainEngine

if __name__ == '__main__':
    opt = Options().opt
    train_files, test_files = get_split(opt.fold)
    train_dataloader = get_train_dataloader(train_files, opt)
    val_dataloader = get_val_dataloader(test_files, opt)
    engine = TrainEngine(opt)
    engine.set_data(train_dataloader, val_dataloader)
    engine.train_model()

示例#7
0
                                                   num_workers=opt.workers,
                                                   pin_memory=True)
    return train_dataloader


def get_val_dataloader(file_list, opt):
    data_transform = Compose([
        PadIfNeeded(
            min_height=opt.val_crop_height, min_width=opt.val_crop_width, p=1),
        CenterCrop(height=opt.val_crop_height, width=opt.val_crop_width, p=1),
        Normalize(p=1)
    ],
                             p=1)
    val_dataset = RoboticsDataset(file_names=file_list,
                                  transform=data_transform,
                                  problem_type=opt.problem_type)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=opt.batch_size,
                                                 shuffle=False,
                                                 num_workers=opt.workers,
                                                 pin_memory=True)
    return val_dataloader


if __name__ == '__main__':
    # Test code for dataloader

    options = Options()
    train_files, test_files = prepare_data.get_split(0)
    get_train_dataloader(train_files, options.opt)
示例#8
0
from torchvision import transforms
from dataset.dataloader import ISICKerasDataset
from model.pix2pix import Pix2Pix
from torch.utils.data import DataLoader

transformations = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.ToTensor()
])
eval_transformations = transforms.Compose([transforms.ToTensor()])

if __name__ == "__main__":
    set_logger('train.log')
    # prepare params for training
    opt = Options()
    args = opt.opts()
    args = opt.params(args)

    # Create datasetloader
    train_dataset = ISICKerasDataset(args.dataset_dir,
                                     data_type='train',
                                     transform=transformations)
    train_datasetLoder = DataLoader(train_dataset,
                                    shuffle=True,
                                    batch_size=args.batch_size,
                                    num_workers=0)

    # train_img = train_dataset[0]

    eval_dataset = ISICKerasDataset(args.dataset_dir,
示例#9
0
#!/usr/bin/env python3

# ------------------------------------------------------------------------------
#
# Author:
#   Armin Hasitzka ([email protected])
#
# Licensed under the MIT license.
#   See LICENSE in the project root for license information.
#
# ------------------------------------------------------------------------------

from enhancitizer import Enhancitizer
from utils.options import Options

if __name__ == '__main__':
    Enhancitizer(Options().collect()).run()
示例#10
0
import os
import time

import tensorflow as tf

from utils.options import Options
from data.dataset import Dataset
from models.cyclegan import CycleGANModel

tf.enable_eager_execution()
"""
Run this module for training.
Required args: --data_dir, --save_dir
"""
if __name__ == "__main__":
    opt = Options().parse(training=True)
    dataset = Dataset(opt)
    model = CycleGANModel(opt)

    device = ("/gpu:" + str(opt.gpu_id)) if opt.gpu_id != -1 else "/cpu:0"

    with tf.device(device):
        global_step = model.global_step
        batches_per_epoch = dataset.get_batches_per_epoch(opt)
        # Initialize Tensorboard summary writer:
        log_dir = os.path.join(opt.save_dir, 'tensorboard')
        summary_writer = tf.contrib.summary.create_file_writer(log_dir, flush_millis=10000)
        for epoch in range(1, opt.epochs):
            start = time.time()
                for train_step in range(batches_per_epoch):
                    # Record summaries every 100 train_steps, we multiply by 3 because there are 3 gradient updates per step.
示例#11
0
# The Controller size (in hidden units)
help = 'The number of hidden units in the controller network'
parser.add_argument('--controller_size',
                    action='store',
                    dest='controller_size',
                    type=int,
                    help=help)

# The Learning Rate
help = 'The learning rate to use (in the form xe-y)'
parser.add_argument('--lr', action='store', dest='lr', type=float, help=help)

args = parser.parse_args()
# 1. setting up
opt = Options(**vars(args))  #unpack the arguments
np.random.seed(opt.seed)

# 2. env     (prototype)
env_prototype = EnvDict[opt.env_type]
# 3. circuit (prototype)
circuit_prototype = CircuitDict[opt.circuit_type]
# 4. agent
agent = AgentDict[opt.agent_type](opt.agent_params,
                                  env_prototype=env_prototype,
                                  circuit_prototype=circuit_prototype)
if args.gpu == True:
    circuit_prototype = torch.nn.DataParallel(circuit_prototype).cuda()

# 6. fit model
if opt.mode == 1:  # train
示例#12
0
import os
import time

import tensorflow as tf

from utils.options import Options
from data.dataset import Dataset
from models.cyclegan import CycleGANModel

tf.enable_eager_execution()
"""
Run this module for testing.
Required args: --data_dir, --save_dir, --results_dir
"""
if __name__ == "__main__":
    opt = Options().parse(training=False)
    dataset = Dataset(opt)
    model = CycleGANModel(opt)

    device = ("/gpu:" + str(opt.gpu_id)) if opt.gpu_id != -1 else "/cpu:0"

    with tf.device(device):
        start = time.time()
        for image_index in range(opt.num_test):
            model.set_input(dataset.data)
            test_images = model.test()
            dataset.save_images(test_images, image_index)
        print(
            "Generating {} test images for both datasets finished in {} sec\n".
            format(opt.num_test,
                   time.time() - start))
示例#13
0
import numpy as np
import os
import torch
import torch.nn as nn
import torch.multiprocessing as mp

from utils.options import Options
from utils.factory import GlobalLogsDict, ActorLogsDict, LearnerLogsDict, EvaluatorLogsDict
from utils.factory import LoggersDict, ActorsDict, LearnersDict, EvaluatorsDict, TestersDict
from utils.factory import EnvsDict, MemoriesDict, ModelsDict

if __name__ == '__main__':
    mp.set_start_method("spawn", force=True)

    opt = Options()
    torch.manual_seed(opt.seed)

    env_prototype = EnvsDict[opt.env_type]
    memory_prototype = MemoriesDict[opt.memory_type]
    model_prototype = ModelsDict[opt.model_type]

    # dummy env to get state/action/reward/gamma/terminal_shape & action_space
    dummy_env = env_prototype(opt.env_params, 0)
    opt.norm_val = dummy_env.norm_val # use the max val of env states to normalize model inputs
    opt.state_shape = dummy_env.state_shape
    opt.action_shape = dummy_env.action_shape
    opt.action_space = dummy_env.action_space
    opt.reward_shape = opt.agent_params.num_tasks
    opt.gamma_shape = opt.agent_params.num_tasks
    opt.terminal_shape = opt.agent_params.num_tasks
    del dummy_env
示例#14
0
    train_dl = data.create_data_loader(opt, "train")
    test_dl = data.create_data_loader(opt, "test")
    criterion = getattr(common, opt.criterion)(*opt.criterion_args)
    temperature = TemperatureScheduler(*opt.temperature)
    optimizer = getattr(torch.optim, opt.optimizer)(model.parameters(), *opt.optimizer_args)
    scheduler = getattr(torch.optim.lr_scheduler, opt.scheduler)(optimizer, *opt.scheduler_args)
    device = torch.device(opt.device)
    model, epoch, optimizer, scheduler = load_checkpoint(opt.checkpoint_path, model, optimizer, scheduler, device)

    if opt.is_classification:
        test_metric = test_accuracy
        metric_name = 'Accuracy'
    else: # segmentation
        test_metric = partial(test_segmentation, n_classes=opt.n_classes)
        metric_name = 'mIoU'

    print('Setting up complete, starting training')
    for ep in range(epoch + 1, opt.max_epoch+1):
        train_epoch(ep, model, criterion, temperature, optimizer, train_dl, device, writer, batch_average=opt.batch_average)
        test_score = test_metric(model, temperature.get(ep), test_dl, device)
        writer.add_scalar(f"{metric_name}/test", test_score, ep * len(test_dl.dataset))
        print(f"Test {metric_name} after {ep} epochs = {test_score}")
        scheduler.step()
        if ep % opt.save_freq == 0:
            save_checkpoint(model, optimizer, scheduler, ep, opt)


if __name__ == '__main__':
    options = Options(config_file_arg="config_path")
    main(options)
示例#15
0
文件: test.py 项目: LiheYoung/U-RISC
    with torch.no_grad():
        for i in range(h // split_size + 1):
            for j in range(w // split_size + 1):
                endi, endj = (i + 1) * split_size, (j + 1) * split_size
                patch = image[:, :, i * split_size: endi, j * split_size: endj]
                patch = patch.contiguous()
                predicted = model.module.tta_eval(patch)
                predicted = predicted * 255.0
                mask = predicted.cpu().squeeze().detach().numpy()
                mask = np.round(mask).astype(np.uint8)
                result[i * split_size: endi, j * split_size: endj] = mask
    return result


if __name__ == "__main__":
    args = Options().parse()
    if "simple" in str.lower(args.dataset):
        dataset = "simple"
    else:
        assert "complex" in str.lower(args.dataset)
        dataset = "complex"

    transform = transforms.Compose([
            transforms.ToTensor()
    ])

    testset = URISC(path=args.dataset, mode="test", transform=transform)
    testloader = DataLoader(testset, batch_size=1, shuffle=False)
    if args.model == "CASENet":
        model = DataParallel(CASENet(backbone=args.backbone)).cuda()
    elif args.model == "ResNetUNet":
示例#16
0
import tensorflow as tf
import numpy as np
from env.preon_env import Preon_env
from utils.options import Options
import tflearn
import sys
import os

from networks import ActorNetwork, CriticNetwork
from replay_buffer import ReplayBuffer

# ==========================
#   Training Parameters
# ==========================

opt = Options()

# Max training steps
MAX_EPISODES = 50000

# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = opt.agent_params.actor_lr
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = opt.agent_params.critic_lr
# Discount factor
GAMMA = opt.agent_params.gamma
# Soft target update param
TAU = opt.agent_params.tau

# ===========================
#   Utility Parameters
示例#17
0
#!/usr/bin/env python
# ! -*- coding: utf-8 -*-
"""
    Main entry point.
    Starts the device detection with notification service.
"""

from notification.telegram_service import TelegramService
from utils.options import Options
from device_detection.device_detector import DeviceDetector

if __name__ == "__main__":
    options = Options.get_cli_options()

    telegram_service = TelegramService()

    detection = DeviceDetector(
        device_address=options.device,
        minimum_absence_for_notification=options.absence,
        device_scan_interval=options.scan_interval,
        notification_service=telegram_service)
    detection.run()