예제 #1
0
def main():
    """

    :return:
    """
    print '=== SWU: Send Email ==='

    args = get_args()
    error = validate_args(args)
    if error:
        print error
        exit(1)

    swu = SendWithUsService(args.key)

    if args.template_data:
        with open('args.template_data') as f:
            template_data = loads(f)
    else:
        template_data = {}

    if args.email:
        send_email(swu, args.template_id, args.email, template_data)
    elif args.segment_id:
        segment_send_email(swu, args.segment_id, template_data)

    print '=== End SWU: Send Email ==='

    exit(0)
예제 #2
0
def main():
    """

    :return:
    """
    print "=== SWU: Group Creation ==="

    args = get_args()
    group_create(args.key, args.group_name, args.group_desc)

    print "=== End SWU: Group Creation ==="

    exit(0)
예제 #3
0
 def __init__(self, agent0, agent1, simulator):
     """
     :param agent0: first player
     :param agent1: second player
     :param simulator: game simulator (can query)
     """
     self.agent0 = agent0
     self.agent1 = agent1
     self.simulator = simulator
     self.args = argument.get_args()
     self.logger = logger.get_logger()
     self.num_run = 0
     self.actionSequence = []
예제 #4
0
def main():
    # Initializing the logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)

    file_handler = logging.FileHandler('xkcd.log', 'ab')

    formatter = logging.Formatter('%(levelname)s:%(message)s')
    file_handler.setFormatter(formatter)

    logger.addHandler(file_handler)

    # Getting the command line arguments
    # args is a dictionary
    args = argument.get_args()
    keys = args.keys()

    try:
        if 'comic_number' in keys:

            download_comic(
                args['path'],
                start=int(args['comic_number']),
                end=int(args['comic_number']) + 1)

        elif 'comic_range' in keys and args['comic_range'][1] != '#':

            download_comic(
                args['path'],
                start=int(args['comic_range'][0]),
                end=int(args['comic_range'][1]) + 1)

        elif 'comic_range' in keys and args['comic_range'][1] == '#':

            download_comic(args['path'], start=int(args['comic_range'][0]))

        elif args['all']:

            download_comic(args['path'])

        elif args['latest']:

            download_comic(args['path'], start=0)

    except Exception as e:
        logger.exception('There was a problem: {}'.format(str(e)))
        print(e)
        print('Error logged')
예제 #5
0
def main():
    """

    :return:
    """
    print '=== SWU: Group Deletion ==='

    args = get_args()
    error = validate_args(args)
    if error:
        print error
        exit(1)

    group_delete(args.key, args.group_name, args.group_id)

    print '=== End SWU: Group Deletion ==='

    exit(0)
예제 #6
0
def main():
    """

    :return:
    """
    print "=== SWU: Remove customer ==="

    args = get_args()
    error = validate_args(args)
    if error:
        print error
        exit(1)

    swu = SendWithUsService(args.key)

    if args.drip_id:
        remove_from_drip(swu, args.drip_id, args.email)
    elif args.delete:
        delete_user(swu, args.email)

    print "=== End SWU: Remove customer ==="

    exit(0)
예제 #7
0
# -*- coding: utf-8 -*-
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.transforms as T

import argparse
from argument import get_args
args = get_args('DQN_PER')

from env import Env
env = Env(args)

from memory import ReplayMemory ,PER_Memory
#memory = ReplayMemory(args)
memory = PER_Memory(args.memory_capacity)

#args.memory_capacity = 20000
#args.learn_start = 20000
#args.render= True
from agent import Agent
agent = Agent(args)

"""
define test function
"""
예제 #8
0
 def initSimulator(self, simulator):
     self.simulator = simulator
     self.logger = logger.get_logger()
     self.args = argument.get_args()
예제 #9
0
        logger.info(f"search agent0 win {agent0Win} games out of {totalGames}")

def supervisedtrain(args, logger, dataProcessor):
    totalDataList = []
    for k in range(args.n_train_data):
        file = os.path.join(args.data_folder, 'searchPlay-' + str(k))
        dataList = dataProcessor.retrieveData(file)
        totalDataList.extend(dataList)
    currentModel = 0
    trainWorker = NetworkTraining()
    trainWorker.train(args.trainepochs, currentModel, totalDataList)


if __name__ == '__main__':
    argument.initialize_args()
    args = argument.get_args()
    logger.initialize_logger(args.log_folder, args.todo, 'info')
    logger = logger.get_logger()
    timer.init()
    argument.print_args(args, logger)

    dataProcessor.initSimulator(Board.Board(args.size, args.numberForWin))

    args.save_folder = 'test_visual'

    if args.todo == 'selfplaytrain':
        train(args, logger, dataProcessor)
    elif args.todo == 'visualize':
        visualize(args, logger, dataProcessor)
    elif args.todo == 'experiment':
        experiment(args, logger, dataProcessor)
def test():

    from torch.optim import SGD
    from argument import get_args
    from model import Efficientnet_Bifpn_ATSS
    args = get_args()
    model = Efficientnet_Bifpn_ATSS(args, load_backboe_weight=False)
    optimizer = SGD(
        model.backbone.backbone_net.parameters(),
        lr=0,
        momentum=0.9,
        weight_decay=0.0001,
        nesterov=True,
    )
    optimizer.add_param_group({
        'params': list(model.backbone.bifpn.parameters()),
        'lr': 0,
        'momentum': 0.9,
        'weight_decay': 0.0001,
        'nesterov': True
    })

    niters = int(1200)
    warmup_scheduler = GluonLRScheduler(optimizer,
                                        mode='linear',
                                        nepochs=1,
                                        iters_per_epoch=50,
                                        target_lr=[1e-4, 1e-3])
    scheduler = GluonLRScheduler(optimizer,
                                 mode='cosine',
                                 nepochs=24,
                                 iters_per_epoch=50)
    scheduler.set_baselrs(warmup_scheduler.target_lr)
    initial_schedluer = False
    #scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=10)
    lrs_1 = []
    lrs_2 = []
    lrs_1.append(optimizer.param_groups[0]['lr'])
    lrs_2.append(optimizer.param_groups[1]['lr'])
    for i in range(niters):
        optimizer.step()
        if i < warmup_scheduler.niters:
            warmup_scheduler.step()
        else:
            scheduler.step()
        lrs_1.append(optimizer.param_groups[0]['lr'])
        lrs_2.append(optimizer.param_groups[1]['lr'])

    from matplotlib import pyplot as plt
    steps = [i for i in range(niters + 1)]
    fig = plt.figure()
    fig.tight_layout()
    ax_1 = fig.add_axes([0.1, 0.2, 0.35, 0.35])
    ax_2 = fig.add_axes([0.6, 0.2, 0.35, 0.35])
    line_1, = ax_1.plot(steps, lrs_1)
    line_2, = ax_2.plot(steps, lrs_2)
    line_1.set_label('learning rate group_1')
    line_2.set_label('learning rate group_2')
    ax_1.legend()
    ax_2.legend()
    plt.show()
예제 #11
0
 def __init__(self):
     self.arg = argument.get_args()
     self.logger = get_logger()
     pass
예제 #12
0
def Training():
    args = argument.get_args()
    logger = get_logger()
    currentModel = -1 if args.overwrite else dataProcessor.getLatestNetworkID()
    trainWorker = NetworkTraining()
    replayBuffer = []
    Loss = []
    WinRate = []

    rollout0 = None
    balance0 = 0
    if args.rolloutMode == 'network':
        rollout0 = None
        balance0 = 0
    elif args.rolloutMode == 'minmax':
        rollout0 = minMaxRolloutFn(1)
        balance0 = 1
    elif args.rolloutMode == 'random':
        rollout0 = randomRolloutFn(20)
        balance0 = 1
    elif args.rolloutMode == 'mix_minmax':
        rollout0 = minMaxRolloutFn(1)
        balance0 = args.balance
    elif args.rolloutMode == 'mix_random':
        rollout0 = randomRolloutFn(30)
        balance0 = args.balance
    else:
        rollout0 = None
        balance0 = 1

    for rd in range(1, args.trainround + 1):
        logger.info("round:%d" % rd)
        if currentModel != -1:
            model = dataProcessor.loadNetwork(args, currentModel)
        else:
            model = PolicyValueFn(args).to(device=args.device)
        eta = math.log(args.trainround / rd) + 1
        file = os.path.join(args.data_folder, f"selfplay-{currentModel+1}.txt")
        #rollout =randomRolloutFn(cnt=7)
        agent1 = Agent.SelfplayAgent(args.numOfIterations,
                                     model,
                                     file,
                                     eta,
                                     rollout=rollout0,
                                     balance=balance0)

        b = Board.Board(args.size, args.numberForWin)
        g = Game.Game(agent0=agent1, agent1=agent1, simulator=b)

        for i in range(1, args.epochs + 1):
            logger.info("epoch %d" % i)
            TimeID = timer.startTime("play time")
            g.run()
            timer.endTime(TimeID)
            timer.showTime(TimeID)
            if i % args.n_save_step == 0:
                agent1.saveData()
            if args.openReplayBuffer and len(replayBuffer) > args.buffersize:
                buffer = []
                for i in range(args.buffersize):
                    buffer.append(random.choice(replayBuffer))
                trainWorker.train(args.miniTrainingEpochs,
                                  currentModel,
                                  buffer,
                                  update=False)
            #if args.openReplayBuffer and len(replayBuffer):
            #    trainWorker.train(args.miniTrainingEpochs, currentModel, replayBuffer, update=False)
        agent1.saveData()
        dataList = dataProcessor.retrieveData(file)
        replayBuffer = replayBuffer + dataList
        if len(replayBuffer) > args.maxBufferSize:
            replayBuffer = replayBuffer[-args.maxBufferSize:]
        currentModel += 1
        TimeID = timer.startTime("network training")
        Loss.append(trainWorker.train(args.trainepochs, currentModel,
                                      dataList))
        timer.endTime(TimeID)
        timer.showTime(TimeID)

        #if args.openReplayBuffer:
        #    TimeID = timer.startTime("update replay buffer")
        #    replayBuffer = trainWorker.getReplayData(currentModel, dataList)
        #    timer.endTime(TimeID)
        #    timer.showTime(TimeID)
        agentTest = Agent.IntelligentAgent(args.numOfIterations,
                                           dataProcessor.loadNetwork(args),
                                           rolloutFn=rollout0,
                                           balance=balance0)

        exp = Experiment()
        WinRate.append(exp.evaluationWithBaseLine(agentTest))
        logger.info("WinRate: %.3f" % WinRate[-1])
    return Loss, WinRate
예제 #13
0
 def __init__(self):
     self.args = argument.get_args()
     self.logger = logger.get_logger()
     self.depth = 0
     self.numOfEvaluations = self.args.numOfEvaluations
     self.evaluationData= []
    ./main.py alexnet ~/caffemodels/alexnet --src ./src --dst ./dst_alexnet
"""
from __future__ import print_function
import os
import sys

import chainer
import numpy

from argument import get_args
from model import load_model
from files import load_image, save_features, mkdir, grep_images


if __name__ == '__main__':
    args, xp = get_args()
    forward, in_size, mean_image = load_model(args)
    mkdir(args.dst)

    msg = True
    ps = []
    path_list = grep_images(args.src)
    total = len(path_list)
    x_batch = numpy.ndarray((args.batchsize, 3, in_size, in_size), dtype=numpy.float32)

    for i, path in enumerate(path_list):
        image = load_image(path, in_size, mean_image)
        x_batch[i % args.batchsize] = image
        ps.append(path)

        if i == 0 and i != total - 1:
예제 #15
0
# -*- coding: utf-8 -*-

import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.transforms as T
import sys

import argparse
from argument import get_args
args = get_args('DQN')

#args.game = 'MountainCar-v0'
#args.max_step = 200
#args.action_space =3
#args.state_space = 2
#args.memory_capacity = 1000
args.learn_start = 1000
#args.render= True
from env import Env
env = Env(args)

from memory import ReplayMemory
memory = ReplayMemory(args)

#args.memory_capacity = 1000
예제 #16
0
"""

import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.transforms as T

import argparse
from argument import get_args

args = get_args('DQN_LSTM')
args.game = 'CartPole-v1'
args.max_step = 500
args.action_space = 2
args.state_space = 4

from env import Env

env = Env(args)

args.memory_capacity = 1000000
args.learn_start = 1000000
#args.render= True
args.lr = 0.001
from memory import ReplayMemory, episodic_experience_buffer
#memory = LSTM_ReplayMemory(args)
예제 #17
0
파일: DQN_double.py 프로젝트: wotmd5731/dqn
# -*- coding: utf-8 -*-

import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.transforms as T

import argparse
from argument import get_args
args = get_args('double_DQN')

from env import Env
env = Env(args)

from memory import ReplayMemory
memory = ReplayMemory(args)

from agent import Agent
agent = Agent(args)
"""
define test function
"""
from plot import _plot_line
current_time = time.time()
Ts, Trewards, Qs = [], [], []
예제 #18
0
    elif args.model_type == 'electra':
        model = ElectraForSequenceClassification.from_pretrained(model_dir)
    elif args.model_type == 'roberta':
        model = XLMRobertaForSequenceClassification.from_pretrained(model_dir)
    model.parameters
    model.to(device)

    # load test datset
    # root = "/opt/ml"
    # root = "/content/drive/MyDrive/Boostcamp/Stage2_KLUE"
    root = args.root
    test_dataset, test_label = load_test_dataset(root, tokenizer)
    test_dataset = RE_Dataset(test_dataset, test_label)

    # predict answer
    pred_answer = inference(model, test_dataset, device)
    # logits, predictions = inference(model, test_dataset, device)

    # make csv file with predicted answer
    # 아래 directory와 columns의 형태는 지켜주시기 바랍니다.
    output = pd.DataFrame(pred_answer, columns=['pred'])
    # output = pd.DataFrame(predictions, columns=['pred'])
    output.to_csv(f'./results/{args.id}/submission{args.id}.csv', index=False)
    # np.save(f'./results/{args.id}/logits{args.id}.npy', logits)
    print('File saved')


if __name__ == '__main__':
    args = get_args()
    main(args)
예제 #19
0
# -*- coding: utf-8 -*-

import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.transforms as T

import argparse
from argument import get_args
args = get_args('DQN_CNN')

from env import Env, Env_CNN
env = Env_CNN(args)

from memory import ReplayMemory
memory = ReplayMemory(args)

args.memory_capacity = 200000
args.learn_start = 2000
#args.render= True
from agent import Agent
agent = Agent(args, dqn_cnn=True)
"""
define test function
"""
from plot import _plot_line