def main():
    parser = argparse.ArgumentParser(description='Semantic Segmentation')
    parser.add_argument('--train_cfg',
                        type=str,
                        default='./configs/train_config.yaml',
                        help='train config path')
    args = parser.parse_args()
    config_folder = Path(args.train_cfg.strip("/"))
    config = load_yaml(config_folder)
    init_seed(config['SEED'])

    df, train_ids, valid_ids = split_dataset(config['DATA_TRAIN'])
    train_dataset = getattribute(config=config,
                                 name_package='TRAIN_DATASET',
                                 df=df,
                                 img_ids=train_ids)
    valid_dataset = getattribute(config=config,
                                 name_package='VALID_DATASET',
                                 df=df,
                                 img_ids=valid_ids)
    train_dataloader = getattribute(config=config,
                                    name_package='TRAIN_DATALOADER',
                                    dataset=train_dataset)
    valid_dataloader = getattribute(config=config,
                                    name_package='VALID_DATALOADER',
                                    dataset=valid_dataset)
    model = getattribute(config=config, name_package='MODEL')
    criterion = getattribute(config=config, name_package='CRITERION')
    optimizer = getattribute(config=config,
                             name_package='OPTIMIZER',
                             params=model.parameters())
    scheduler = getattribute(config=config,
                             name_package='SCHEDULER',
                             optimizer=optimizer)
    device = config['DEVICE']
    metric_ftns = [accuracy_dice_score]
    num_epoch = config['NUM_EPOCH']
    gradient_clipping = config['GRADIENT_CLIPPING']
    gradient_accumulation_steps = config['GRADIENT_ACCUMULATION_STEPS']
    early_stopping = config['EARLY_STOPPING']
    validation_frequency = config['VALIDATION_FREQUENCY']
    saved_period = config['SAVED_PERIOD']
    checkpoint_dir = Path(config['CHECKPOINT_DIR'], type(model).__name__)
    checkpoint_dir.mkdir(exist_ok=True, parents=True)
    resume_path = config['RESUME_PATH']
    learning = Learning(model=model,
                        optimizer=optimizer,
                        criterion=criterion,
                        device=device,
                        metric_ftns=metric_ftns,
                        num_epoch=num_epoch,
                        scheduler=scheduler,
                        grad_clipping=gradient_clipping,
                        grad_accumulation_steps=gradient_accumulation_steps,
                        early_stopping=early_stopping,
                        validation_frequency=validation_frequency,
                        save_period=saved_period,
                        checkpoint_dir=checkpoint_dir,
                        resume_path=resume_path)
    learning.train(tqdm(train_dataloader), tqdm(valid_dataloader))
Пример #2
0
 def __init__(self, total_episodes, time_training_step, time_testing_step):
     self.timestamp = datetime.now().strftime('%Y%m%d_%H_%M_%S')
     # Ínitialize a track instance
     self.Track = Track(self.timestamp)
     # Return the shape of the track
     self.way, self.dims = self.Track.retrieve_way(
     ), self.Track.retrieve_way().shape
     # Initiate the Learning module using the dimensions of the track
     self.Learning = Learning(self.timestamp, self.dims)
     self.total_episodes = total_episodes
     # setup
     self.setup()
Пример #3
0
    def __init__(self, pomdpfile='program.pomdp'):

        self.time = ['morning', 'afternoon', 'evening']
        self.location = ['classroom', 'library']
        self.identity = ['student', 'professor', 'visitor']
        self.intention = ['interested', 'not_interested']
        self.reason = Reason('reason0.plog')
        self.model = Model(filename='program.pomdp', parsing_print_flag=False)
        self.policy = Policy(5, 4, output='program.policy')
        self.instance = []
        self.results = {}
        self.learning = Learning('./', 'interposx.csv', 'interposy.csv')
        self.trajectory_label = 0
Пример #4
0
    NUMBER_FRAMES = 75  # The number of frames per session. 4 sessions make 300 (75 * 4) session for an object.
    CATEGORY_COLUMN = 256
    INSTANCE_COLUMN = 257
    SESSION_COLUMN = 258
    DAY_COLUMN = 259
    CAMERA_COLUMN = 260
    IMAGE_NAME_COLUMN = 261
    DATA_DIMENSION = 256
    FACTOR_FRAMES = 2  # Every Nth frame will be selected. Only 2 and 4 are reasonable values. Original number of
    # frames is 8. In this case it will be reduced to 4 and 2, respectively.

    # ------------------------------------ Initialization --------------------------------------------------------------

    rgwr = GammaGWR()
    utils = Utilities()
    learning = Learning()
    args = utils.parse_arguments()

    # Get data.
    original_data = utils.load_data(args.dataset).values
    original_data_normalized = utils.normalize_data(original_data,
                                                    DATA_DIMENSION)

    original_data_day_one = original_data_normalized[np.in1d(
        original_data_normalized[:, DAY_COLUMN], ONE_DAY)]
    original_data_left_camera = original_data_day_one[np.in1d(
        original_data_day_one[:, CAMERA_COLUMN], CAMERA)]
    selected_data = original_data_left_camera[np.in1d(
        original_data_left_camera[:, CATEGORY_COLUMN], CATEGORIES)]

    # Comment if categorization instead of identification to use. For the rest of the evaluation the CATEGORY column
                                audio = 5
                            else:
                                audio = 1
                            video = 0
                            camera = 0
                            checkagain = 1
                            counter_silence = 0
                ##############################################################
                elif info == 1:
                    data = ast.literal_eval(data.decode('utf-8'))
                    if menuvariable == 1:
                        machinelearningtext = data
                        conn.sendall(b"FinishLearning.endmes")
                    else:

                        learnpepper = Learning(data)
                        pas, law, saving, swerve = learnpepper.learn()

                        check = checklearning(pas, law, saving, swerve)

                        if check != 'ok':
                            mystring = "LearnMore.endmes" + check
                            string = mystring.encode('utf-8')
                            conn.sendall(string)
                            learn = 2
                            if interactionvariable == 1:
                                audio = 5
                            else:
                                audio = 1
                            info = 0
                        else:
Пример #6
0
import numpy as np
from environment_explauto.environment import TestEnvironment
from learning import Learning

if __name__ == "__main__":

    print "Create environment"
    environment = TestEnvironment()

    print "Create agent"
    learning = Learning(dict(m_mins=environment.conf.m_mins,
                             m_maxs=environment.conf.m_maxs,
                             s_mins=environment.conf.s_mins,
                             s_maxs=environment.conf.s_maxs),
                        condition="AMB")
    learning.start()

    print
    print "Do 100 autonomous steps:"
    for i in range(100):
        context = environment.get_current_context()
        m = learning.produce(context)
        s = environment.update(m)
        learning.perceive(s)

    print "Do 1 arm demonstration"
    m_demo_traj = np.zeros((25, 4)) + 0.001
    m_demo = environment.torsodemo2m(m_demo_traj)
    print "m_demo", m_demo
    s = environment.update(m_demo)
    learning.perceive(s, m_demo=m_demo)
Пример #7
0
	filename = "supporter.cfg"
	config = configparser.SafeConfigParser()
	config.read(filename)

	jid = config.get("xmpp", "jid")
	password = config.get("xmpp", "password")
	room = config.get("xmpp", "room")
	nick = config.get("xmpp", "nick")

	logging.basicConfig(level=logging.INFO,
		                        format='%(levelname)-8s %(message)s')

	words = config.get("brain", "words")
	synonyms = config.get("brain", "synonyms")
	thoughts = config.get("brain", "thoughts")
	messages = config.get("brain", "messages")
	state = config.get("brain", "state")
	brain = Learning(words, synonyms, thoughts, messages, state)

	xmpp = Client(jid, password, room, nick)
	xmpp.register_plugin('xep_0030') # Service Discovery
	xmpp.register_plugin('xep_0045') # Multi-User Chat
	xmpp.register_plugin('xep_0199') # XMPP Ping
	def do_brain(nick, msg, **keywords):
		brain(msg, nick, xmpp.muc_send)
	xmpp.add_message_listener(do_brain)
	if xmpp.connect():
		xmpp.process(block=True)
	else:
		print("Unable to connect")
Пример #8
0
import numpy as np
from environment_explauto.environment import TestEnvironment
from learning import Learning

if __name__ == "__main__":

    print "Create environment"
    environment = TestEnvironment()

    print "Create agent"
    learning = Learning(
        dict(m_mins=environment.conf.m_mins,
             m_maxs=environment.conf.m_maxs,
             s_mins=environment.conf.s_mins,
             s_maxs=environment.conf.s_maxs))
    learning.start()

    print
    print "Do 500 autonomous steps:"
    for i in range(100):
        context = environment.get_current_context()
        m = learning.produce(context)
        s = environment.update(m)
        learning.perceive(s)

    print "Do 1 arm demonstration"
    m_demo_traj = np.zeros((25, 4)) + 0.001
    m_demo = environment.torsodemo2m(m_demo_traj)
    print "m_demo", m_demo
    s = environment.update(m_demo)
    learning.perceive(s, m_demo=m_demo)
Пример #9
0
def main():
    parser = argparse.ArgumentParser(description='Semantic Segmentation')
    parser.add_argument('--train_cfg',
                        type=str,
                        default='./configs/train.yaml',
                        help='train config path')
    args = parser.parse_args()
    config_folder = Path(args.train_cfg.strip("/"))
    config = load_yaml(config_folder)
    init_seed(config['SEED'])

    image_datasets = {
        x: vinDataset(root_dir=config['ROOT_DIR'],
                      file_name=config['FILE_NAME'],
                      num_triplet=config['NUM_TRIPLET'],
                      phase=x)
        for x in ['train', 'valid']
    }
    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=config['BATCH_SIZE'],
                                       shuffle=True,
                                       num_workers=4,
                                       pin_memory=True)
        for x in ['train', 'valid']
    }

    model = getattribute(config=config, name_package='MODEL')
    criterion = getattribute(config=config, name_package='CRITERION')
    metric_ftns = [accuracy_score]
    optimizer = getattribute(config=config,
                             name_package='OPTIMIZER',
                             params=model.parameters())
    scheduler = getattribute(config=config,
                             name_package='SCHEDULER',
                             optimizer=optimizer)
    device = config['DEVICE']
    num_epoch = config['NUM_EPOCH']
    gradient_clipping = config['GRADIENT_CLIPPING']
    gradient_accumulation_steps = config['GRADIENT_ACCUMULATION_STEPS']
    early_stopping = config['EARLY_STOPPING']
    validation_frequency = config['VALIDATION_FREQUENCY']
    saved_period = config['SAVED_PERIOD']
    checkpoint_dir = Path(config['CHECKPOINT_DIR'], type(model).__name__)
    checkpoint_dir.mkdir(exist_ok=True, parents=True)
    resume_path = config['RESUME_PATH']
    learning = Learning(model=model,
                        criterion=criterion,
                        metric_ftns=metric_ftns,
                        optimizer=optimizer,
                        device=device,
                        num_epoch=num_epoch,
                        scheduler=scheduler,
                        grad_clipping=gradient_clipping,
                        grad_accumulation_steps=gradient_accumulation_steps,
                        early_stopping=early_stopping,
                        validation_frequency=validation_frequency,
                        save_period=saved_period,
                        checkpoint_dir=checkpoint_dir,
                        resume_path=resume_path)

    learning.train(tqdm(dataloaders['train']), tqdm(dataloaders['valid']))
Пример #10
0
import math
import time
import torch
import torch.nn as nn
import torchvision.transforms as t
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np
import matplotlib.pyplot as plt
from mario_q import MarioManager
from helpers import Transition
from helpers import ReplayMemory
from helpers import DQN
from learning import Learning

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
em = MarioManager(device)
memory = ReplayMemory(1000000)
policy = DQN(em.screen_height(), em.screen_width()).to(device)
target = DQN(em.screen_height(), em.screen_width()).to(device)
optimizer = optim.Adam(params=policy.parameters(), lr=0.001)
target.load_state_dict(policy.state_dict())
target.eval()

learning_agent = Learning(policy, target, em, memory, optimizer)
learning_agent.learn()
learning_agent.plot_on_figure()
Пример #11
0
def train_fold(train_config, experiment_folder, pipeline_name, log_dir,
               fold_id, train_dataloader, val_dataloader, binarizer_fn,
               eval_fn):

    fold_logger = helpers.init_logger(log_dir, f'train_log_{fold_id}.log')

    best_checkpoint_folder = Path(experiment_folder,
                                  train_config['CHECKPOINTS']['BEST_FOLDER'])
    best_checkpoint_folder.mkdir(parents=True, exist_ok=True)

    checkpoints_history_folder = Path(
        experiment_folder, train_config['CHECKPOINTS']['FULL_FOLDER'],
        f'fold_{fold_id}')
    checkpoints_history_folder.mkdir(parents=True, exist_ok=True)
    checkpoints_topk = train_config['CHECKPOINTS']['TOPK']

    calculation_name = f'{pipeline_name}_fold_{fold_id}'

    device = train_config['DEVICE']

    module = importlib.import_module(train_config['MODEL']['PY'])
    model_class = getattr(module, train_config['MODEL']['CLASS'])
    model = model_class(**train_config['MODEL']['ARGS'])

    pretrained_model_config = train_config['MODEL'].get('PRETRAINED', False)
    if pretrained_model_config:
        loaded_pipeline_name = pretrained_model_config['PIPELINE_NAME']
        pretrained_model_path = Path(
            pretrained_model_config['PIPELINE_PATH'],
            pretrained_model_config['CHECKPOINTS_FOLDER'],
            f'{loaded_pipeline_name}_fold_{fold_id}.pth')

        if pretrained_model_path.is_file():
            model.load_state_dict(torch.load(pretrained_model_path))
            fold_logger.info(f'Load model from {pretrained_model_path}')

    if len(train_config['DEVICE_LIST']) > 1:
        model = torch.nn.DataParallel(model)

    module = importlib.import_module(train_config['CRITERION']['PY'])
    loss_class = getattr(module, train_config['CRITERION']['CLASS'])
    loss_fn = loss_class(**train_config['CRITERION']['ARGS'])

    optimizer_class = getattr(torch.optim, train_config['OPTIMIZER']['CLASS'])
    optimizer = optimizer_class(model.parameters(),
                                **train_config['OPTIMIZER']['ARGS'])
    scheduler_class = getattr(torch.optim.lr_scheduler,
                              train_config['SCHEDULER']['CLASS'])
    scheduler = scheduler_class(optimizer, **train_config['SCHEDULER']['ARGS'])

    n_epochs = train_config['EPOCHS']
    grad_clip = train_config['GRADIENT_CLIPPING']
    grad_accum = train_config['GRADIENT_ACCUMULATION_STEPS']
    early_stopping = train_config['EARLY_STOPPING']
    validation_frequency = train_config.get('VALIDATION_FREQUENCY', 1)

    freeze_model = train_config['MODEL']['FREEZE']

    Learning(optimizer, binarizer_fn, loss_fn, eval_fn, device, n_epochs,
             scheduler, freeze_model, grad_clip, grad_accum, early_stopping,
             validation_frequency, calculation_name, best_checkpoint_folder,
             checkpoints_history_folder, checkpoints_topk,
             fold_logger).run_train(model, train_dataloader, val_dataloader)
Пример #12
0
    spec = prep.pre_transportation(spec)
    spec = prep.pre_land(spec)
    spec = prep.pre_area(spec)
    spec = prep.pre_structure(spec)
    spec = prep.pre_age(spec)
    spec = prep.pre_floor(spec)
    spec = prep.pre_direction(spec)
    spec = prep.pre_mcost(spec)
    spec = prep.pre_rcost(spec)
    spec = prep.encode_cat_to_label(spec)
    spec = prep.pre_outlier(spec)
    spec = prep.del_bid(spec)
    prep.save_spec(spec)

    #learning
    learning = Learning(data_version)
    gs = learning.xgb_learning(spec)
    learning.show_results(gs)
    learning.save_model(gs)
#     spec_all,spec_bad=learning.check_prediction(spec)
#     learning.save_prediction(spec_all)

elif mode == 'inference':
    data_version = datetime.now().strftime("%Y%m%d")
    model_version = input('which model do you use? ex.original...')
    #spec取得
    get_spec = GetSpec(data_version)
    page_num = get_spec.get_page_num()
    urls = get_spec.get_urls(page_num)
    pages = get_spec.get_pages(urls)
    get_spec.save_pages(pages)
def main():
    parser = argparse.ArgumentParser(description='Pytorch parser')
    parser.add_argument('--train_cfg',
                        type=str,
                        default='./configs/efficientdet-d0.yaml',
                        help='train config path')
    parser.add_argument('-d',
                        '--device',
                        default=None,
                        type=str,
                        help='indices of GPUs to enable (default: all)')
    parser.add_argument('-r',
                        '--resume',
                        default=None,
                        type=str,
                        help='path to latest checkpoint (default: None)')

    CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
    options = [
        CustomArgs(['-lr', '--learning_rate'],
                   type=float,
                   target='OPTIMIZER,ARGS,lr'),
        CustomArgs(
            ['-bs', '--batch_size'],
            type=int,
            target=
            'TRAIN_DATALOADER,ARGS,batch_size;VALID_DATALOADER,ARGS,batch_size'
        )
    ]
    config = config_parser(parser, options)
    init_seed(config['SEED'])
    train_dataset = VOCDetection(root=VOC_ROOT,
                                 transform=SSDAugmentation(
                                     voc['min_dim'], MEANS))

    train_dataloader = getattribute(config=config,
                                    name_package='TRAIN_DATALOADER',
                                    dataset=train_dataset,
                                    collate_fn=detection_collate)
    # valid_dataloader = getattribute(config = config, name_package = 'VALID_DATALOADER', dataset = valid_dataset)
    model = getattribute(config=config, name_package='MODEL')
    criterion = getattribute(config=config, name_package='CRITERION')
    optimizer = getattribute(config=config,
                             name_package='OPTIMIZER',
                             params=model.parameters())
    scheduler = getattribute(config=config,
                             name_package='SCHEDULER',
                             optimizer=optimizer)
    device = config['DEVICE']
    metric_ftns = []
    num_epoch = config['NUM_EPOCH']
    gradient_clipping = config['GRADIENT_CLIPPING']
    gradient_accumulation_steps = config['GRADIENT_ACCUMULATION_STEPS']
    early_stopping = config['EARLY_STOPPING']
    validation_frequency = config['VALIDATION_FREQUENCY']
    tensorboard = config['TENSORBOARD']
    checkpoint_dir = Path(config['CHECKPOINT_DIR'], type(model).__name__)
    checkpoint_dir.mkdir(exist_ok=True, parents=True)
    resume_path = config['RESUME_PATH']
    learning = Learning(model=model,
                        criterion=criterion,
                        optimizer=optimizer,
                        scheduler=scheduler,
                        metric_ftns=metric_ftns,
                        device=device,
                        num_epoch=num_epoch,
                        grad_clipping=gradient_clipping,
                        grad_accumulation_steps=gradient_accumulation_steps,
                        early_stopping=early_stopping,
                        validation_frequency=validation_frequency,
                        tensorboard=tensorboard,
                        checkpoint_dir=checkpoint_dir,
                        resume_path=resume_path)

    learning.train(tqdm(train_dataloader))