def __init__(self, **kwargs):
        """
        Basic empty constructor for an EyeTracakerClient class
        :param kwargs: list
        :return: None
        """
        if getattr(self, "defaults", None) is None:
            self.defaults = {}
        # setting logGaze before constructing Element, so it will
        # end up in self.config.logGaze == True
        self.defaults["logGaze"] = True

        # call Element constructor
        super(EyeTrackerClient, self).__init__(**kwargs)

        # this is not a visible element!!!
        self.hideElement()
        """ constructor for the EyeTracker class """
        self.status = TRACKER_STATUS.DISCONNECTED
        # gazeData is a list of triplets (timeStamp, x, y)
        if (getattr(self.config, "smoothWindow", None) == None):
            self.config.smoothWindow = 5.0
        else:
            self.config.smoothWindow = float(self.config.smoothWindow)
        self.gazeData = [(0, 0, 0)] * int(self.config.smoothWindow)
        self.smoothSampleXY = [0.0, 0.0]

        if self.config.logGaze:
            # one gaze log per participant
            self.gazeLogger = Logger(self.baseTime,
                                     "run/gazeData_" +
                                     self.config.world.participantId + ".log",
                                     mode='w')
        else:
            self.gazeLogger = Logger(self.baseTime, "noLog")
        #self.gazeLogger.startLog()

        # create a mutex for accessing the gazeData list
        self.gazeMutex = Mutex('gazeMutex')

        gazeTex = loader.loadTexture(
            'Elements/Game/models/textures/outter_circle.png')
        gazeTex.setMinfilter(Texture.FTLinearMipmapLinear)
        gazeTex.setAnisotropicDegree(2)

        gazeNode = loader.loadModel("Elements/Game/models/plane")
        gazeNode.reparentTo(self.hudNP)
        gazeNode.setScale(0.1, 1.0, 0.1)
        gazeNode.setTransparency(1)
        gazeNode.setAlphaScale(0.1)
        gazeNode.setTexture(gazeTex)
        gazeNode.setPos(-1.7, 0, 0)
        self.gazeNode = gazeNode
        #w,h = map(float,(cam.screenWidth,cam.screenHeight))
        self.normX = base.win.getXSize() / float(
            base.win.getYSize())  # self.config.world.getCamera().ratio
        self.hudNP.setBin('fixed', 10)
예제 #2
0
class Mailer():
    server = None
    message = ""
    emails = ["myMail"]
    logger = Logger()

    def __init__(self):
        pass

    def sendMail(self, message, htmlFlag, nextUpdateParam):
        if (htmlFlag):
            self.message = MIMEMultipart(
                "alternative", None,
                [MIMEText(message),
                 MIMEText(message, 'html')])
        else:
            self.message = MIMEText(message)
        self.message[
            'Subject'] = 'Prime Interest For this Month, Next Update At ' + nextUpdateParam
        self.message['From'] = 'PrimeService - Auto Generated'
        self.message['To'] = ','.join(self.emails)
        self.server = smtplib.SMTP('smtp.gmail.com', 587)
        # self.server.ehlo()
        self.server.starttls()
        self.server.ehlo()
        self.server.login('myMail', 'ikiwrwtovszgxyik')
        print('aftrer login and before send - message:')
        print(self.message.as_string())
        self.server.sendmail("*****@*****.**", self.emails,
                             self.message.as_string())
        self.server.close()
        pass

    def log(self, message):
        self.logger.log('Mailer', message)
예제 #3
0
 def setUp(self):
     self.families = []
     self.individuals = []
     self.famMap = {}
     self.indMap = {}
     self.logger = Logger()
     self.seed_data()
     self.spousecheck = None
예제 #4
0
 def analyze_once(self):
     self.refresh_event.wait()
     analysis_logger = self.logger
     if not self.settings["log_analysis"]:
         analysis_logger = Logger(FileWriter,
                                  PathProvider.pathes["ANALYSIS"].location)
     Analyzer(self.cron, self.settings["analyzer_iterations_amount"],
              analysis_logger).analyze_all()
예제 #5
0
    def __init__(self, bot):
        self.bot = bot

        if not hasattr(bot, 'lavalink'):  # This ensures the client isn't overwritten during cog reloads.
            bot.lavalink = lavalink.Client(bot.user.id)
            bot.lavalink.add_node('avexlava.herokuapp.com', 80, 'youshallnotpass', 'us', 'default-node')  # Host, Port, Password, Region, Name
            bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')

        lavalink.add_event_hook(self.track_hook)

        self.logger = Logger(False)
        self.logger.log(name="Discord", output='Music Cog Loaded!')
예제 #6
0
 def __init__(self, **kwargs):
     """
     See the Element class to find out what attributes are available
     from scratch
     """
     super(MouseLogger, self).__init__(**kwargs)
     self.mouseLog = Logger(
         self.baseTime,
         "run/mouseLog_%s.txt" % self.config.world.participantId)
     self.left = MouseButton.one()
     self.leftDown = False
     self.right = MouseButton.two()
     self.rightDown = False
     self.hideElement()
예제 #7
0
    def __init__(self, **kwargs):
        super(EventLogger, self).__init__(**kwargs)
        self.listener = DirectObject()

        # registering some events by hand
        self.listener.accept('crossHair',self.logEvent)

        uniqueFileName = self.config.logfile +"_"+ self.config.world.participantId + ".log"
        self.eventLog = Logger(self.baseTime, uniqueFileName, 'w')
        #self.eventLog.startLog()
        self.eventLog.logEvent('Event logger started\n')
        taskMgr.add( self.updateHooks, 'updateHooks' )

        self.registeredEvents = messenger.getEvents()
        for e in self.registeredEvents:
            self.listener.accept(e, self.logEvent, [e])

        self.hideElement()
예제 #8
0
 def setUp(self) -> None:
     self.testName = unittest.TestCase.id(self)
     self.logger = Logger(self.testName)
     if not self.logger:
         self.fail("No logger!")
     self.logger.info("Test started!")
     self.driver = webdriver.Chrome()
     self.UtilsSelenium = UtilsSelenium(self.driver, self.logger)
     self.driver.get("https://www.eobuwie.com.pl/")
     WebDriverWait(self.driver, 20).until(
         EC.element_to_be_clickable(mpl.main_page_login_button))
     self.driver.maximize_window()
     # assert self.driver.title == "Modne buty damskie, męskie, dziecięce oraz torebki  | eobuwie.pl", f"Loaded page in not main page. Title is {self.driver.title} and should be Modne buty damskie, męskie, dziecięce oraz torebki  | eobuwie.pl"
     accept_cookies_button = self.driver.find_element(
         *mpl.main_page_cookies_accept_button)
     accept_cookies_close_button = self.driver.find_element(
         *mpl.main_page_cookies_close_button)
     if accept_cookies_button.is_displayed():
         accept_cookies_close_button.click()
예제 #9
0
    def do_init(self):
        if self.did_init_osc_server == 0:
            self.did_init_osc_server = 1

            try:
                self.oscServer = RemixNet.OSCServer(self, '127.0.0.1', 9111,
                                                    None, 9008)
                self.callbackManager = self.oscServer.callbackManager
            except:
                self.error_log("touchAble: OSCServer init failed")
                self.did_init_osc_server = 0
                pass
            try:
                self.tACommon = touchAbleCommon.touchAbleCommon(
                    self._touchAble__c_instance, self.oscServer, self)
            except:
                self.did_init = 0
                self.error_log("touchAble: tACommon init failed")
                pass
            try:
                self.tAListener = touchAbleListener.touchAbleListener(
                    self._touchAble__c_instance, self.oscServer, self)
            except:
                self.did_init = 0
                self.error_log("touchAble: tAListener init failed")
                pass
            try:
                self.logger = self._LOG and Logger() or 0
                self.tA_log("Logging Enabled")
            except:
                self.error_log("touchAble: Logger init failed")
                pass
            try:
                with self.component_guard():
                    pass
                    #self._session_ring = self._create_session()  # SVH Change # Zerodebug Addition
            except:
                self.error_log("touchAble: _create_session failed")

            self.did_init = 1
        else:
            self.did_init = 0
            pass
예제 #10
0
from twitchio.ext import commands
from Utils.Logger import Logger
from Utils.Secrets import IRC_TOKEN, NICK, CLIENT_ID, PREFIX, INITIAL_CHANNELS

bot = commands.Bot(irc_token=IRC_TOKEN,
                   nick=NICK,
                   client_id=CLIENT_ID,
                   prefix=PREFIX,
                   initial_channels=INITIAL_CHANNELS)
modules = ["Modules.Misc"]

logger = Logger(True)


@bot.event
async def event_ready():
    logger.success(name="Twitch", output="Connected to gateway!")
    for i in modules:
        try:
            bot.load_module(i)
        except Exception as e:
            logger.error(name="Twitch", output=f"Error loading {i}. {e}")


if __name__ == "__main__":
    bot.run()
예제 #11
0
def after_feature(self, context):
    Logger().write("Ending Feature.....")
예제 #12
0
@contact: [email protected]
@software: pengwei
@file: ParseExcel.py
@time: 2019/11/7 13:53
@desc:
'''

from openpyxl import load_workbook
from datetime import datetime
from Utils.ConfigRead import *
from Utils.Logger import Logger
from Utils.ParseYaml import ParseYaml
from openpyxl.drawing.image import Image
import xlrd,time,os

logger = Logger('logger').getlog()

class ParseExcel(object):

    '''
    解析EXCEL文档
    '''
    def __init__(self, filename):
        self.filename = filename
        self.parseyaml = ParseYaml()
        # 读取excel文件
        self.wb = load_workbook(self.filename)

    def getRowValue(self, sheetname, rowno):
        """
        获取excel某一行的数据
예제 #13
0
 def setUp(self) -> None:
     self.individuals = {}
     self.seed_data()
     self.record = Logger()
예제 #14
0
def train_func(rank,
               args,
               shared_model,
               optimizer,
               env_conf,
               datasets=None,
               shared_dict=None):
    if args.deploy:
        return
    ptitle('Train {0}'.format(rank))
    print('Start training agent: ', rank)

    if rank == 0:
        logger = Logger(args.log_dir[:-1] + '_losses/')
        train_step = 0

    gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]
    env_conf["env_gpu"] = gpu_id
    torch.manual_seed(args.seed + rank)
    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed + rank)

    raw_list, gt_lbl_list = datasets
    env = EM_env(raw_list,
                 env_conf,
                 type="train",
                 gt_lbl_list=gt_lbl_list,
                 seed=args.seed + rank)

    if optimizer is None:
        if args.optimizer == 'RMSprop':
            optimizer = optim.RMSprop(shared_model.parameters(), lr=args.lr)
        if args.optimizer == 'Adam':
            optimizer = optim.Adam(shared_model.parameters(),
                                   lr=args.lr,
                                   amsgrad=args.amsgrad)

    player = Agent(None, env, args, None)
    player.gpu_id = gpu_id
    player.model = get_model(args,
                             args.model,
                             env.observation_space.shape,
                             args.features,
                             atrous_rates=args.atr_rate,
                             num_actions=2,
                             split=args.data_channel,
                             gpu_id=gpu_id,
                             multi=args.multi)
    player.state = player.env.reset()
    player.state = torch.from_numpy(player.state).float()

    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            player.state = player.state.cuda()
            player.model = player.model.cuda()
    player.model.train()

    if rank == 0:
        eps_reward = 0
        pinned_eps_reward = 0

    while True:
        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                player.model.load_state_dict(shared_model.state_dict())
        else:
            player.model.load_state_dict(shared_model.state_dict())

        if player.done:
            player.eps_len = 0

            if rank == 0:
                if train_step % args.train_log_period == 0 and train_step > 0:
                    print("train: step", train_step, "\teps_reward",
                          eps_reward)
                if train_step > 0:
                    pinned_eps_reward = player.env.sum_reward.mean()
                    eps_reward = 0

            if args.lstm_feats:
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        player.cx, player.hx = player.model.lstm.init_hidden(
                            batch_size=1, use_cuda=True)
                else:
                    player.cx, player.hx = player.model.lstm.init_hidden(
                        batch_size=1, use_cuda=False)
        elif args.lstm_feats:
            player.cx = Variable(player.cx.data)
            player.hx = Variable(player.hx.data)

        for step in range(args.num_steps):

            if rank < args.lbl_agents:
                player.action_train(use_lbl=True)
            else:
                player.action_train()

            if rank == 0:
                eps_reward = player.env.sum_reward.mean()
            if player.done:
                break

        if player.done:
            state = player.env.reset(player.model, gpu_id)
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()

        if "3D" in args.data:
            R = torch.zeros(1, 1, env_conf["size"][0], env_conf["size"][1],
                            env_conf["size"][2])
        else:
            R = torch.zeros(1, 1, env_conf["size"][0], env_conf["size"][1])

        if args.lowres:
            R = torch.zeros(1, 1, env_conf["size"][0] // 2,
                            env_conf["size"][1] // 2)

        if not player.done:
            if args.lstm_feats:
                value, _, _ = player.model(
                    (Variable(player.state.unsqueeze(0)), (player.hx,
                                                           player.cx)))
            else:
                value, _ = player.model(Variable(player.state.unsqueeze(0)))
            R = value.data

        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                R = R.cuda()

        player.values.append(Variable(R))
        policy_loss = 0
        value_loss = 0

        if "3D" in args.data:
            gae = torch.zeros(1, 1, env_conf["size"][0], env_conf["size"][1],
                              env_conf["size"][2])
        else:
            gae = torch.zeros(1, 1, env_conf["size"][0], env_conf["size"][1])

        if args.rew_drop:
            keep_map = torch.tensor(player.env.keep_map)
        if args.lowres:
            gae = torch.zeros(1, 1, env_conf["size"][0] // 2,
                              env_conf["size"][1] // 2)

        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                gae = gae.cuda()
                if args.rew_drop:
                    keep_map = keep_map.cuda()
        R = Variable(R)

        for i in reversed(range(len(player.rewards))):
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    reward_i = torch.tensor(player.rewards[i]).cuda()
            else:
                reward_i = torch.tensor(player.rewards[i])

            R = args.gamma * R + reward_i
            if args.rew_drop:
                advantage = R - player.values[i]
                value_loss = value_loss + (0.5 * advantage * advantage *
                                           keep_map).mean()
                delta_t = player.values[
                    i + 1].data * args.gamma + reward_i - player.values[i].data
                gae = gae * args.gamma * args.tau + delta_t
            else:
                advantage = R - player.values[i]
                value_loss = value_loss + (0.5 * advantage * advantage).mean()
                delta_t = player.values[
                    i + 1].data * args.gamma + reward_i - player.values[i].data
                gae = gae * args.gamma * args.tau + delta_t
            if args.noisy:
                policy_loss = policy_loss - \
                    (player.log_probs[i] * Variable(gae)).mean ()
            else:
                if args.rew_drop:
                    policy_loss = policy_loss - \
                        (player.log_probs[i] * Variable(gae) * keep_map).mean () - \
                        (args.entropy_alpha * player.entropies[i] * keep_map).mean ()
                else:
                    policy_loss = policy_loss - \
                        (player.log_probs[i] * Variable(gae)).mean () - \
                        (args.entropy_alpha * player.entropies[i]).mean ()

        player.model.zero_grad()
        sum_loss = (policy_loss + value_loss)

        curtime = time.time()
        # print ("backward curtime:", curtime)
        sum_loss.backward()
        # print ("backward done", time.time () - curtime)
        ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)

        curtime = time.time()
        # print ("optim curtime:", curtime)
        optimizer.step()
        # print ("optim done", time.time () - curtime)

        player.clear_actions()
        if args.wctrl == "s2m":
            player.env.config["spl_w"] = shared_dict["spl_w"]
            player.env.config["mer_w"] = shared_dict["mer_w"]

        if rank == 0:
            train_step += 1
            if train_step % args.log_period == 0 and train_step > 0:
                log_info = {
                    'train: value_loss': value_loss,
                    'train: policy_loss': policy_loss,
                    'train: eps reward': pinned_eps_reward,
                }

                if "EX" in args.model:
                    log_info["cell_prob_loss"] = cell_prob_loss

                for tag, value in log_info.items():
                    logger.scalar_summary(tag, value, train_step)
예제 #15
0
# coding: UTF-8
import re
import threading
import threadpool
from Utils import DBUtils
from Utils import WeiXin
from Utils.HttpUtils import HttpUtils
from Utils.Logger import Logger
from Config import Configs
from Utils.ProxyProcess import process_proxy

logger = Logger(__file__).getLogger()

testStr = "<title></title>"


##根据关键字搜索公众账号
def handlePublicAccount(args):
    keyword = args[0]
    currentPage = args[1]
    accountType = args[2]

    items = parseAccount(keyword, currentPage)
    if (items is None):
        logger.warn("获取账号失败: %s" % keyword)
        return
    for item in items:
        openId = item[0]
        ext = item[1]
        logo = item[2]
        name = item[3]
예제 #16
0
import sys
from Module.Train import train
from Module.Eval import eval
import zipfile
import os
from FilesManager.FilesManager import FilesManager
from Utils.Logger import Logger
import urllib

if __name__ == "__main__":
    version_filename_flag = '.data_ver2'

    # create logger
    logger = Logger()

    application = None
    name = None
    gpu = None

    ## get input parameters
    # application
    if len(sys.argv) > 1:
        application = sys.argv[1]

    # module name
    if len(sys.argv) > 2:
        name = sys.argv[2]

    # gpu number
    if len(sys.argv) > 3:
        gpu = sys.argv[3]
예제 #17
0
 def __init__(self, db: str):
     self.db = db
     self.logger = Logger(write=False)
     self.c = ""
예제 #18
0
def test(args, shared_model, env_conf, datasets=None, hasLbl=True):
    if hasLbl:
        ptitle('Valid agent')
    else:
        ptitle("Test agent")

    gpu_id = args.gpu_ids[-1]
    env_conf["env_gpu"] = gpu_id
    log = {}
    logger = Logger(args.log_dir)

    setup_logger('{}_log'.format(args.env),
                 r'{0}{1}_log'.format(args.log_dir, args.env))
    log['{}_log'.format(args.env)] = logging.getLogger('{}_log'.format(
        args.env))
    d_args = vars(args)

    if hasLbl:
        for k in d_args.keys():
            log['{}_log'.format(args.env)].info('{0}: {1}'.format(
                k, d_args[k]))

    torch.manual_seed(args.seed)

    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed)

    if "EM_env" in args.env:
        raw_list, gt_lbl_list = datasets
        env = EM_env(raw_list, env_conf, type="train", gt_lbl_list=gt_lbl_list)
    else:
        env = Voronoi_env(env_conf)

    reward_sum = 0
    start_time = time.time()
    num_tests = 0
    reward_total_sum = 0

    player = Agent(None, env, args, None)

    player.gpu_id = gpu_id

    if args.model == "UNet":
        player.model = UNet(env.observation_space.shape[0], args.features, 2)
    elif args.model == "FusionNetLstm":
        player.model = FusionNetLstm(env.observation_space.shape,
                                     args.features, 2, args.hidden_feat)
    elif args.model == "FusionNet":
        player.model = FusionNet(env.observation_space.shape[0], args.features,
                                 2)
    elif (args.model == "UNetLstm"):
        player.model = UNetLstm(env.observation_space.shape, args.features, 2,
                                args.hidden_feat)

    player.state = player.env.reset()
    player.state = torch.from_numpy(player.state).float()
    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            player.model = player.model.cuda()
            player.state = player.state.cuda()
    player.model.eval()

    flag = True

    create_dir(args.save_model_dir)

    recent_episode_scores = []
    renderlist = []
    renderlist.append(player.env.render())
    max_score = 0
    while True:
        if flag:
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.model.load_state_dict(shared_model.state_dict())
            else:
                player.model.load_state_dict(shared_model.state_dict())
            player.model.eval()
            flag = False

        player.action_test()
        reward_sum += player.reward.mean()
        renderlist.append(player.env.render())

        if player.done:
            flag = True

            num_tests += 1
            reward_total_sum += reward_sum
            reward_mean = reward_total_sum / num_tests
            if hasLbl:
                log['{}_log'.format(args.env)].info(
                    "VALID: Time {0}, episode reward {1}, num tests {4}, episode length {2}, reward mean {3:.4f}"
                    .format(
                        time.strftime("%Hh %Mm %Ss",
                                      time.gmtime(time.time() - start_time)),
                        reward_sum, player.eps_len, reward_mean, num_tests))

            recent_episode_scores += [reward_sum]
            if len(recent_episode_scores) > 200:
                recent_episode_scores.pop(0)

            if args.save_max and np.mean(recent_episode_scores) >= max_score:
                max_score = np.mean(recent_episode_scores)
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = player.model.state_dict()
                        torch.save(
                            state_to_save,
                            '{0}{1}.dat'.format(args.save_model_dir,
                                                'best_model_' + args.env))

            if num_tests % args.save_period == 0:
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = player.model.state_dict()
                        torch.save(
                            state_to_save, '{0}{1}.dat'.format(
                                args.save_model_dir,
                                args.env + '_' + str(num_tests)))

            if num_tests % args.log_period == 0:
                if hasLbl:
                    print(
                        "----------------------VALID SET--------------------------"
                    )
                    print("Log test #:", num_tests)
                    print("rewards: ", player.reward.mean())
                    print("sum rewards: ", reward_sum)
                    print("------------------------------------------------")

                log_img = np.concatenate(renderlist, 0)
                if hasLbl:
                    log_info = {"valid_sample": log_img}
                else:
                    log_info = {"test_sample": log_img}

                for tag, img in log_info.items():
                    img = img[None]
                    logger.image_summary(tag, img, num_tests)

                if hasLbl:
                    log_info = {'mean_valid_reward': reward_mean}
                    for tag, value in log_info.items():
                        logger.scalar_summary(tag, value, num_tests)

            renderlist = []
            reward_sum = 0
            player.eps_len = 0

            player.clear_actions()
            state = player.env.reset()
            renderlist.append(player.env.render())
            time.sleep(15)
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()
예제 #19
0
    def send(self):
        self.sendButton.configure(state='disabled')
        email = [self.emailText.get()]
        password = self.passwordTextBox.get()
        print("password:"******"",
                                 self.smtpValue.get(),
                                 progressbar=self.progressBar,
                                 interval=interval,
                                 startPoint=self.startPoint,
                                 ssl=self.ssl.get())
                self.product.updateLimit(
                    int(self.startPoint.get()) - int(subLimit))
                self.startText.configure(state='normal')
                self.sendButton.configure(state='normal')
                self.MailCheckButton.configure(state='normal')

            except smtplib.SMTPAuthenticationError as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['authError'])
                raise e
            except smtplib.SMTPConnectError as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['smtpError'])
                raise e
            except smtplib.SMTPSenderRefused as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['senderRefused'])
                raise e
            except smtplib.SMTPServerDisconnected as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['serverDisconnect'])
                raise e
            except smtplib.SMTPDataError as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['senderRefused'])
                raise e
            except FileNotFoundError as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['listError'])
            except KeyError as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['keyError'])
                raise e
            except StartError as e:
                messagebox.showerror(errors[language]['error'],
                                     errors[language]['startError'])
                raise e

        except FileNotFoundError as e:
            print(e)
            messagebox.showerror("Hata", "Mesaj Dosyası Bulunamadı")
            self.startText.configure(state='normal')
            self.sendButton.configure(state='normal')

        except Exception as e:
            print(e)
            self.startText.configure(state='normal')
            self.sendButton.configure(state='normal')
            self.MailCheckButton.configure(state='normal')
            Logger(e)
예제 #20
0
def train_func(rank, args, shared_model, optimizer, env_conf, datasets):
    if args.deploy:
        return
    ptitle('Train {0}'.format(rank))
    print('Start training agent: ', rank)

    if rank == 0:
        logger = Logger(args.log_dir[:-1] + '_losses/')
        train_step = 0

    gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]
    env_conf["env_gpu"] = gpu_id
    torch.manual_seed(args.seed + rank)
    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed + rank)

    env = Debug_env(datasets, env_conf, seed=args.seed + rank)

    if optimizer is None:
        if args.optimizer == 'RMSprop':
            optimizer = optim.RMSprop(shared_model.parameters(), lr=args.lr)
        if args.optimizer == 'Adam':
            optimizer = optim.Adam(shared_model.parameters(),
                                   lr=args.lr,
                                   amsgrad=args.amsgrad)

    player = Agent(None, env, args, None)
    player.gpu_id = gpu_id
    nChan = 3
    if args.is3D:
        nChan = 4
    if args.alpha_only:
        nChan = 1
    if not args.is3D:
        player.model = get_model(args,
                                 "ENet",
                                 input_shape=env_conf["obs_shape"],
                                 num_actions=args.num_actions * nChan)
    elif not args.obs3D:
        player.model = get_model(args,
                                 "ENet",
                                 input_shape=env_conf["obs_shape"],
                                 num_actions=args.num_actions * nChan)
    elif args.obs3D:
        player.model = get_model(args,
                                 "Net3D",
                                 input_shape=env_conf["obs_shape"],
                                 num_actions=args.num_actions * nChan)

    player.state = player.env.reset()
    player.state = torch.from_numpy(player.state).float()

    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            player.state = player.state.cuda()
            player.model = player.model.cuda()
    player.model.train()

    if rank == 0:
        eps_reward = 0
        pinned_eps_reward = 0

    while True:
        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                player.model.load_state_dict(shared_model.state_dict())
        else:
            player.model.load_state_dict(shared_model.state_dict())

        if player.done:
            player.eps_len = 0

            if rank == 0:
                if train_step % args.train_log_period == 0 and train_step > 0:
                    print("train: step", train_step, "\teps_reward",
                          eps_reward)
                if train_step > 0:
                    pinned_eps_reward = player.env.sum_rewards.mean()
                    eps_reward = 0

        for step in range(args.num_steps):
            player.action_train()
            if rank == 0:
                eps_reward = player.env.sum_rewards.mean()
            if player.done:
                break

        if player.done:
            state = player.env.reset()
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()

        if not args.alpha_only:
            if not args.is3D:
                R = torch.zeros(1, 1, args.num_actions * 3)
            else:
                R = torch.zeros(1, 1, args.num_actions * 4)
        else:
            R = torch.zeros(1, 1, args.num_actions)

        if not player.done:
            value, _ = player.model(Variable(player.state.unsqueeze(0)))
            R = value.data

        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                R = R.cuda()

        player.values.append(Variable(R))
        policy_loss = 0
        value_loss = 0

        if not args.alpha_only:
            if not args.is3D:
                gae = torch.zeros(1, 1, args.num_actions * 3)
            else:
                gae = torch.zeros(1, 1, args.num_actions * 4)
        else:
            gae = torch.zeros(1, 1, args.num_actions)

        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                gae = gae.cuda()
        R = Variable(R)

        for i in reversed(range(len(player.rewards))):
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    reward_i = torch.tensor(player.rewards[i]).cuda()
            else:
                reward_i = torch.tensor(player.rewards[i])

            R = args.gamma * R + reward_i

            advantage = R - player.values[i]
            value_loss = value_loss + (0.5 * advantage * advantage).mean()
            delta_t = player.values[
                i + 1].data * args.gamma + reward_i - player.values[i].data
            gae = gae * args.gamma * args.tau + delta_t

            policy_loss = policy_loss - \
                (player.log_probs[i] * Variable(gae)).mean () - \
                (args.entropy_alpha * player.entropies[i]).mean ()

        player.model.zero_grad()
        sum_loss = (policy_loss + value_loss)

        curtime = time.time()
        sum_loss.backward()
        ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)

        curtime = time.time()
        optimizer.step()

        player.clear_actions()

        if rank == 0:
            train_step += 1
            if train_step % args.log_period * 10 == 0 and train_step > 0:
                log_info = {
                    'train: value_loss': value_loss,
                    'train: policy_loss': policy_loss,
                    'train: eps reward': pinned_eps_reward,
                }

                for tag, value in log_info.items():
                    logger.scalar_summary(tag, value, train_step)
예제 #21
0
def test(args, shared_model, env_conf, datasets):
    ptitle('Test agent')
    gpu_id = args.gpu_ids[-1]
    log = {}

    logger = Logger(args.log_dir)

    setup_logger('{}_log'.format(args.env),
                 r'{0}{1}_log'.format(args.log_dir, args.env))
    log['{}_log'.format(args.env)] = logging.getLogger('{}_log'.format(
        args.env))
    d_args = vars(args)
    for k in d_args.keys():
        log['{}_log'.format(args.env)].info('{0}: {1}'.format(k, d_args[k]))

    torch.manual_seed(args.seed)

    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed)

    raw, gt_lbl = datasets
    env = EM_env(raw, gt_lbl, env_conf)
    reward_sum = 0
    start_time = time.time()
    num_tests = 0
    reward_total_sum = 0

    player = Agent(None, env, args, None)
    player.gpu_id = gpu_id
    # player.model = A3Clstm (env.observation_space.shape, env_conf["num_action"], args.hidden_feat)
    player.model = SimpleCNN(env.observation_space.shape,
                             env_conf["num_action"])
    player.state = player.env.reset()
    player.state = torch.from_numpy(player.state).float()
    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            player.model = player.model.cuda()
            player.state = player.state.cuda()

    flag = True
    create_dir(args.save_model_dir)

    recent_episode_scores = []
    renderlist = []
    renderlist.append(player.env.render())
    max_score = 0
    while True:
        if flag:
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.model.load_state_dict(shared_model.state_dict())
            else:
                player.model.load_state_dict(shared_model.state_dict())
            player.model.eval()
            flag = False

        player.action_test()
        reward_sum += player.reward
        renderlist.append(player.env.render())

        if player.done:
            flag = True
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()

            num_tests += 1
            reward_total_sum += reward_sum
            reward_mean = reward_total_sum / num_tests
            log['{}_log'.format(args.env)].info(
                "Time {0}, episode reward {1}, num tests {4}, episode length {2}, reward mean {3:.4f}"
                .format(
                    time.strftime("%Hh %Mm %Ss",
                                  time.gmtime(time.time() - start_time)),
                    reward_sum, player.eps_len, reward_mean, num_tests))

            recent_episode_scores += [reward_sum]
            if len(recent_episode_scores) > 200:
                recent_episode_scores.pop(0)

            if args.save_max and np.mean(recent_episode_scores) >= max_score:
                max_score = np.mean(recent_episode_scores)
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = player.model.state_dict()
                        torch.save(
                            state_to_save,
                            '{0}{1}.dat'.format(args.save_model_dir,
                                                'best_model_' + args.env))

            if num_tests % args.save_period == 0:
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = player.model.state_dict()
                        torch.save(
                            state_to_save, '{0}{1}.dat'.format(
                                args.save_model_dir,
                                args.env + '_' + str(num_tests)))

            if num_tests % args.log_period == 0:
                print("------------------------------------------------")
                print("Log test #:", num_tests)
                print("Prob: ")
                for i in range(player.env.agent_out_shape[1]):
                    for j in range(player.env.agent_out_shape[2]):
                        print("{:.3f}\t".format(player.prob_cpu[0, i, j]),
                              end='')
                    print()
                print("Actions :", player.actions)
                print("Actions transformed: ")
                print(player.actions_explained)
                print("rewards: ", player.rewards)
                print("sum rewards: ", reward_sum)
                print("------------------------------------------------")
                log_img = np.concatenate(renderlist, 0)
                log_info = {"test: traning_sample": log_img}
                for tag, img in log_info.items():
                    img = img[None]
                    logger.image_summary(tag, img, num_tests)

                log_info = {'test: mean_reward': reward_mean}
                for tag, value in log_info.items():
                    logger.scalar_summary(tag, value, num_tests)

            renderlist = []
            reward_sum = 0
            player.eps_len = 0
            time.sleep(30)
            player.clear_actions()
            state = player.env.reset()
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()
    def __init__(self,
                 gpi_type="Linguistic",
                 nof_predicates=51,
                 nof_objects=150,
                 rnn_steps=2,
                 is_train=True,
                 learning_rate=0.0001,
                 learning_rate_steps=1000,
                 learning_rate_decay=0.5,
                 including_object=False,
                 layers=[500, 500, 500],
                 reg_factor=0.0,
                 lr_object_coeff=4):
        """
        Construct module:
        - create input placeholders
        - apply SGP rnn_steps times
        - create labels placeholders
        - create module loss and train_step

        :type gpi_type: "Linguistic", "FeatureAttention", "NeighbourAttention"
        :param nof_predicates: nof predicate labels
        :param nof_objects: nof object labels
        :param rnn_steps: number of time to apply SGP
        :param is_train: whether the module will be used to train or eval
        """
        # save input
        self.learning_rate_decay = learning_rate_decay
        self.learning_rate_steps = learning_rate_steps
        self.learning_rate = learning_rate
        self.nof_predicates = nof_predicates
        self.nof_objects = nof_objects
        self.is_train = is_train
        self.rnn_steps = rnn_steps
        self.embed_size = 300
        self.gpi_type = gpi_type

        self.including_object = including_object
        self.lr_object_coeff = lr_object_coeff
        self.layers = layers
        self.reg_factor = reg_factor
        self.activation_fn = tf.nn.relu
        self.reuse = None
        # logging module
        logger = Logger()

        ##
        # module input
        self.phase_ph = tf.placeholder(tf.bool, name='phase')

        # confidence
        self.confidence_relation_ph = tf.placeholder(
            dtype=tf.float32,
            shape=(None, None, self.nof_predicates),
            name="confidence_relation")
        self.confidence_relation_ph = tf.contrib.layers.dropout(
            self.confidence_relation_ph,
            keep_prob=0.9,
            is_training=self.phase_ph)
        self.confidence_entity_ph = tf.placeholder(dtype=tf.float32,
                                                   shape=(None,
                                                          self.nof_objects),
                                                   name="confidence_entity")
        self.confidence_entity_ph = tf.contrib.layers.dropout(
            self.confidence_entity_ph,
            keep_prob=0.9,
            is_training=self.phase_ph)
        # spatial features
        self.entity_bb_ph = tf.placeholder(dtype=tf.float32,
                                           shape=(None, 14),
                                           name="obj_bb")

        # word embeddings
        self.word_embed_entities_ph = tf.placeholder(dtype=tf.float32,
                                                     shape=(self.nof_objects,
                                                            self.embed_size),
                                                     name="word_embed_objects")
        self.word_embed_relations_ph = tf.placeholder(
            dtype=tf.float32,
            shape=(self.nof_predicates, self.embed_size),
            name="word_embed_predicates")

        # labels
        if self.is_train:
            self.labels_relation_ph = tf.placeholder(
                dtype=tf.float32,
                shape=(None, None, self.nof_predicates),
                name="labels_predicate")
            self.labels_entity_ph = tf.placeholder(dtype=tf.float32,
                                                   shape=(None,
                                                          self.nof_objects),
                                                   name="labels_object")
            self.labels_coeff_loss_ph = tf.placeholder(
                dtype=tf.float32, shape=(None), name="labels_coeff_loss")

        # store all the outputs of of rnn steps
        self.out_confidence_entity_lst = []
        self.out_confidence_relation_lst = []
        # rnn stage module
        confidence_relation = self.confidence_relation_ph
        confidence_entity = self.confidence_entity_ph

        # features msg
        for step in range(self.rnn_steps):
            confidence_relation, confidence_entity_temp = \
                self.sgp(in_confidence_relation=confidence_relation,
                         in_confidence_entity=confidence_entity,
                         scope_name="deep_graph")
            # store the confidence
            self.out_confidence_relation_lst.append(confidence_relation)
            if self.including_object:
                confidence_entity = confidence_entity_temp
                # store the confidence
                self.out_confidence_entity_lst.append(confidence_entity_temp)
            self.reuse = True

        #confidence_entity = confidence_entity_temp
        self.out_confidence_relation = confidence_relation
        self.out_confidence_entity = confidence_entity
        reshaped_relation_confidence = tf.reshape(confidence_relation,
                                                  (-1, self.nof_predicates))
        self.reshaped_relation_probes = tf.nn.softmax(
            reshaped_relation_confidence)
        self.out_relation_probes = tf.reshape(self.reshaped_relation_probes,
                                              tf.shape(confidence_relation),
                                              name="out_relation_probes")
        self.out_entity_probes = tf.nn.softmax(confidence_entity,
                                               name="out_entity_probes")

        # loss
        if self.is_train:
            # Learning rate
            self.lr_ph = tf.placeholder(dtype=tf.float32,
                                        shape=[],
                                        name="lr_ph")

            self.loss, self.gradients, self.grad_placeholder, self.train_step = self.module_loss(
            )
예제 #23
0
 def __init__(self, bot):
     self.bot = bot
     self.logger = Logger(False)
     self.logger.log(name="Discord", output="Misc Cog Loaded!")
     self.db = DatabaseHandler(db="database.db")
     self.db.connect()
예제 #24
0
def before_all(context):
    Logger().write("Starting Framework >>>>>>>>>>>>>>")
예제 #25
0
 def init_components(self):
     PathProvider.load()
     self.logger = Logger(FileWriter, PathProvider.pathes["LOG"].location)
     self.logger.log_info(App.__app_start_text)
     self.__init_settings()
예제 #26
0
def before_scenario(self, context):
    Logger().write("Starting Scenario.....")
예제 #27
0
파일: test.py 프로젝트: hvcl/ColorRL
def test_func(args,
              shared_model,
              env_conf,
              datasets=None,
              tests=None,
              shared_dict=None):
    ptitle('Valid agent')

    if args.valid_gpu < 0:
        gpu_id = args.gpu_ids[-1]
    else:
        gpu_id = args.valid_gpu

    env_conf["env_gpu"] = gpu_id

    if not args.deploy:
        log = {}

        logger = Logger(args.log_dir)

        create_dir(args.log_dir + "models/")
        create_dir(args.log_dir + "tifs/")
        create_dir(args.log_dir + "tifs_test/")

        os.system("cp *.py " + args.log_dir)
        os.system("cp *.sh " + args.log_dir)
        os.system("cp models/*.py " + args.log_dir + "models/")

        setup_logger('{}_log'.format(args.env),
                     r'{0}{1}_log'.format(args.log_dir, args.env))
        log['{}_log'.format(args.env)] = logging.getLogger('{}_log'.format(
            args.env))
        d_args = vars(args)
        env_conf_log = env_conf

    if tests is not None:
        if args.testlbl:
            test_env = EM_env(tests[0],
                              env_conf,
                              type="test",
                              gt_lbl_list=tests[1])
        else:
            test_env = EM_env(tests[0], env_conf, type="test")

    if not args.deploy:
        for k in d_args.keys():
            log['{}_log'.format(args.env)].info('{0}: {1}'.format(
                k, d_args[k]))
        for k in env_conf_log.keys():
            log['{}_log'.format(args.env)].info('{0}: {1}'.format(
                k, env_conf_log[k]))

    torch.manual_seed(args.seed)

    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed)

    raw_list, gt_lbl_list = datasets
    env = EM_env(raw_list, env_conf, type="train", gt_lbl_list=gt_lbl_list)

    reward_sum = 0
    start_time = time.time()
    num_tests = 0
    reward_total_sum = 0

    player = Agent(None, env, args, None)
    player.gpu_id = gpu_id
    player.model = get_model(args,
                             args.model,
                             env_conf["observation_shape"],
                             args.features,
                             atrous_rates=args.atr_rate,
                             num_actions=2,
                             split=args.data_channel,
                             gpu_id=gpu_id,
                             multi=args.multi)

    player.state = player.env.reset()
    player.state = torch.from_numpy(player.state).float()

    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            player.model = player.model.cuda()
            player.state = player.state.cuda()
    player.model.eval()

    flag = True
    if not args.deploy:
        create_dir(args.save_model_dir)

    recent_episode_scores = ScalaTracker(100)
    recent_FgBgDice = ScalaTracker(100)
    recent_bestDice = ScalaTracker(100)
    recent_diffFG = ScalaTracker(100)

    recent_MUCov = ScalaTracker(100)
    recent_MWCov = ScalaTracker(100)
    recent_AvgFP = ScalaTracker(100)
    recent_AvgFN = ScalaTracker(100)

    recent_rand_i = ScalaTracker(100)

    renderlist = []
    renderlist.append(player.env.render())
    max_score = 0

    # ----------------------------------------- Deploy / Inference -----------------------------------------
    if args.deploy:
        with torch.cuda.device(gpu_id):
            player.model.load_state_dict(shared_model.state_dict())

        # inference (args, None, player.model, tests [0], test_env, gpu_id, player.env.rng, len (tests [0]))
        if len(tests) == 4:
            inference(args, None, player.model, tests[0], test_env, gpu_id,
                      player.env.rng, len(tests[0]), tests[3])
        else:
            inference(args, None, player.model, tests[0], test_env, gpu_id,
                      player.env.rng, len(tests[0]))

        return
    # ----------------------------------------- End Deploy / Inference -----------------------------------------

    merge_ratios = []
    split_ratios = []

    if args.wctrl == "s2m":
        schedule = args.wctrl_schedule

        delta = (shared_dict['spl_w'] - shared_dict['mer_w']) / (2 *
                                                                 len(schedule))

        mer_w_delta = delta
        mer_w_var = shared_dict['mer_w']
        mer_w_scheduler = Scheduler(mer_w_var, schedule, mer_w_delta)

        split_delta = -delta / len(args.out_radius)
        split_var = shared_dict['spl_w'] / len(args.out_radius)
        spl_w_scheduler = Scheduler(split_var, schedule, split_delta)

    while True:
        if flag:
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.model.load_state_dict(shared_model.state_dict())
            else:
                player.model.load_state_dict(shared_model.state_dict())
            player.model.eval()
            flag = False

        player.action_test()
        reward_sum += player.reward.mean()
        renderlist.append(player.env.render())

        if player.done:
            flag = True
            num_tests += 1

            reward_total_sum += reward_sum
            reward_mean = reward_total_sum / num_tests

            log['{}_log'.format(args.env)].info(
                "VALID: Time {0}, episode reward {1}, num tests {4}, episode length {2}, reward mean {3:.4f}"
                .format(
                    time.strftime("%Hh %Mm %Ss",
                                  time.gmtime(time.time() - start_time)),
                    reward_sum, player.eps_len, reward_mean, num_tests))

            recent_episode_scores.push(reward_sum)

            if args.save_max and recent_episode_scores.mean() >= max_score:
                max_score = recent_episode_scores.mean()
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = {}
                        state_to_save = player.model.state_dict()
                        torch.save(
                            state_to_save,
                            '{0}{1}.dat'.format(args.save_model_dir,
                                                'best_model_' + args.env))

            if num_tests % args.save_period == 0:
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = player.model.state_dict()
                        torch.save(
                            state_to_save,
                            '{0}{1}.dat'.format(args.save_model_dir,
                                                str(num_tests)))

            if num_tests % args.log_period == 0:
                if tests is not None and not args.DEBUG:
                    inference(args, logger, player.model, tests[0], test_env,
                              gpu_id, player.env.rng, num_tests)

                if (np.max(env.lbl) != 0 and np.max(env.gt_lbl) != 0):
                    bestDice, FgBgDice, diffFG, MWCov, MUCov, AvgFP, AvgFN, rand_i = evaluate(
                        args, player.env)

                    recent_FgBgDice.push(FgBgDice)
                    recent_diffFG.push(abs(diffFG))
                    recent_bestDice.push(bestDice)

                    recent_MWCov.push(MWCov)
                    recent_MUCov.push(MUCov)
                    recent_AvgFP.push(AvgFP)
                    recent_AvgFN.push(AvgFN)

                    recent_rand_i.push(rand_i)

                    log_info = {
                        "bestDice": recent_bestDice.mean(),
                        "FgBgDice": recent_FgBgDice.mean(),
                        "diffFG": recent_diffFG.mean(),
                        "MWCov": recent_MWCov.mean(),
                        "MUCov": recent_MUCov.mean(),
                        "AvgFP": recent_AvgFP.mean(),
                        "AvgFN": recent_AvgFN.mean(),
                        "rand_i": recent_rand_i.mean()
                    }

                    for tag, value in log_info.items():
                        logger.scalar_summary(tag, value, num_tests)
                else:
                    bestDice, FgBgDice, diffFG = 0, 0, 0
                    MWCov, MUCov, AvgFP, AvgFN = 0, 0, 0, 0
                    rand_i = 0

                print(
                    "----------------------VALID SET--------------------------"
                )
                print(args.env)
                print("bestDice:", bestDice, "FgBgDice:", FgBgDice, "diffFG:",
                      diffFG, "MWCov:", MWCov, "MUCov:", MUCov, "AvgFP:",
                      AvgFP, "AvgFN:", AvgFN, "rand_i:", rand_i)
                # print ("mean bestDice")
                print("Log test #:", num_tests)
                print("rewards: ", player.reward.mean())
                print("sum rewards: ", reward_sum)
                print("#gt_values:", len(np.unique(player.env.gt_lbl)))
                print("values:")
                values = player.env.unique()
                print(np.concatenate([values[0][None], values[1][None]], 0))
                print("------------------------------------------------")

                log_img = np.concatenate(renderlist[::-1], 0)

                if not "3D" in args.data:
                    for i in range(3):
                        player.probs.insert(0, np.zeros_like(player.probs[0]))
                    while (len(player.probs) - 3 < args.max_episode_length):
                        player.probs.append(np.zeros_like(player.probs[0]))

                    probslist = [
                        np.repeat(np.expand_dims(prob, -1), 3, -1)
                        for prob in player.probs
                    ]
                    probslist = np.concatenate(probslist, 1)
                    probslist = (probslist * 256).astype(np.uint8, copy=False)
                    # log_img = renderlist [-1]
                    print(probslist.shape, log_img.shape)
                    log_img = np.concatenate([probslist, log_img], 0)

                log_info = {"valid_sample": log_img}

                print(log_img.shape)
                io.imsave(
                    args.log_dir + "tifs/" + str(num_tests) + "_sample.tif",
                    log_img.astype(np.uint8))
                io.imsave(
                    args.log_dir + "tifs/" + str(num_tests) + "_pred.tif",
                    player.env.lbl.astype(np.uint8))
                io.imsave(args.log_dir + "tifs/" + str(num_tests) + "_gt.tif",
                          player.env.gt_lbl.astype(np.int32))

                if args.seg_scale:
                    log_info["scaler"] = player.env.scaler

                for tag, img in log_info.items():
                    img = img[None]
                    logger.image_summary(tag, img, num_tests)

                if not args.deploy:
                    log_info = {
                        'mean_valid_reward':
                        reward_mean,
                        '100_mean_reward':
                        recent_episode_scores.mean(),
                        'split_ratio':
                        player.env.split_ratio_sum.sum() /
                        np.count_nonzero(player.env.gt_lbl),
                        'merge_ratio':
                        player.env.merge_ratio_sum.sum() /
                        np.count_nonzero(player.env.gt_lbl),
                    }

                    if args.wctrl == 's2m':
                        log_info.update({
                            'mer_w':
                            mer_w_scheduler.value(),
                            'spl_w':
                            spl_w_scheduler.value() * len(args.out_radius),
                        })

                    merge_ratios.append(player.env.merge_ratio_sum.sum() /
                                        np.count_nonzero(player.env.gt_lbl))
                    split_ratios.append(player.env.split_ratio_sum.sum() /
                                        np.count_nonzero(player.env.gt_lbl))

                    print("split ratio: ", np.max(player.env.split_ratio_sum),
                          np.min(player.env.split_ratio_sum))
                    print("merge ratio: ", np.max(player.env.merge_ratio_sum),
                          np.min(player.env.merge_ratio_sum))

                    print("merge ratio: ", merge_ratios)
                    print("split ratio: ", split_ratios)

                    for tag, value in log_info.items():
                        logger.scalar_summary(tag, value, num_tests)

            renderlist = []
            reward_sum = 0
            player.eps_len = 0

            if args.wctrl == "s2m":
                shared_dict["spl_w"] = spl_w_scheduler.next()
                shared_dict["mer_w"] = mer_w_scheduler.next()
                player.env.config["spl_w"] = shared_dict["spl_w"]
                player.env.config["mer_w"] = shared_dict["mer_w"]

            player.clear_actions()
            state = player.env.reset(player.model, gpu_id)
            renderlist.append(player.env.render())

            time.sleep(15)
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()
예제 #28
0
def before_feature(self, context):
    Logger().write("Starting Feature.....")
예제 #29
0
def train (rank, args, shared_model, optimizer, env_conf, datasets=None):
    ptitle('Training Agent: {}'.format(rank))
    print ('Start training agent: ', rank)
    
    if rank == 0:
        logger = Logger (args.log_dir)
        train_step = 0

    gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]
    env_conf ["env_gpu"] = gpu_id
    torch.manual_seed(args.seed + rank)
    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed + rank)
    if "EM_env" in args.env:
        raw, lbl, prob, gt_lbl = datasets
        env = EM_env (raw, lbl, prob, env_conf, 'train', gt_lbl)
    else:
        env = Voronoi_env (env_conf)

    if optimizer is None:
        if args.optimizer == 'RMSprop':
            optimizer = optim.RMSprop (shared_model.parameters (), lr=args.lr)
        if args.optimizer == 'Adam':
            optimizer = optim.Adam (shared_model.parameters (), lr=args.lr, amsgrad=args.amsgrad)

        # env.seed (args.seed + rank)
    if not args.continuous:
        player = Agent (None, env, args, None)
    else:
        player = Agent_continuous (None, env, args, None)
    player.gpu_id = gpu_id
    if not args.continuous:
        player.model = A3Clstm (env.observation_space.shape, env_conf["num_action"], args.hidden_feat)
    else:
        player.model = A3Clstm_continuous (env.observation_space.shape, env_conf["num_action"], args.hidden_feat)

    player.state = player.env.reset ()
    player.state = torch.from_numpy (player.state).float ()
    old_score = player.env.old_score
    final_score = 0

    if gpu_id >= 0:
        with torch.cuda.device (gpu_id):
            player.state = player.state.cuda ()
            player.model = player.model.cuda ()
    player.model.train ()

    if rank == 0:
        eps_reward = 0
        pinned_eps_reward = 0
        mean_log_prob = 0

    # print ("rank: ", rank)

    while True:
        if gpu_id >= 0:
            with torch.cuda.device (gpu_id):
                player.model.load_state_dict (shared_model.state_dict ())
        else:
            player.model.load_state_dict (shared_model.state_dict ())
        
        if player.done:
            player.eps_len = 0
            if rank == 0:
                if 0 <= (train_step % args.train_log_period) < args.max_episode_length:
                    print ("train: step", train_step, "\teps_reward", eps_reward, 
                        "\timprovement", final_score - old_score)
                old_score = player.env.old_score
                pinned_eps_reward = eps_reward
                eps_reward = 0
                mean_log_prob = 0
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.cx = Variable(torch.zeros(1, args.hidden_feat).cuda())
                    player.hx = Variable(torch.zeros(1, args.hidden_feat).cuda())
            else:
                player.cx = Variable(torch.zeros(1, args.hidden_feat))
                player.hx = Variable(torch.zeros(1, args.hidden_feat))
        else:
            player.cx = Variable(player.cx.data)
            player.hx = Variable(player.hx.data)

        for step in range(args.num_steps):
            player.action_train ()
            if rank == 0:
                # if 0 <= (train_step % args.train_log_period) < args.max_episode_length:
                #     print ("train: step", train_step, "\taction = ", player.action)
                eps_reward += player.reward
                # print (eps_reward)
                mean_log_prob += player.log_probs [-1] / env_conf ["T"]
            if player.done:
                break

        if player.done:
            # if rank == 0:
            #     print ("----------------------------------------------")
            final_score = player.env.old_score
            state = player.env.reset ()
            player.state = torch.from_numpy (state).float ()
            if gpu_id >= 0:
                with torch.cuda.device (gpu_id):
                    player.state = player.state.cuda ()

        R = torch.zeros (1, 1)
        if not player.done:
            if not args.continuous:
                value, _, _ = player.model((Variable(player.state.unsqueeze(0)),
                                        (player.hx, player.cx)))
            else:
                value, _, _, _ = player.model((Variable(player.state.unsqueeze(0)),
                                        (player.hx, player.cx)))
            R = value.data

        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                R = R.cuda()

        player.values.append(Variable(R))
        policy_loss = 0
        value_loss = 0
        gae = torch.zeros(1, 1)
        if gpu_id >= 0:
            with torch.cuda.device(gpu_id):
                gae = gae.cuda()
        R = Variable(R)

        for i in reversed(range(len(player.rewards))):
            R = args.gamma * R + player.rewards[i]
            advantage = R - player.values[i]
            value_loss = value_loss + 0.5 * advantage.pow(2)

            delta_t = player.values[i + 1].data * args.gamma + player.rewards[i] - \
                        player.values[i].data

            gae = gae * args.gamma * args.tau + delta_t
            # print (player.rewards [i])
            if not args.continuous:
                policy_loss = policy_loss - \
                    player.log_probs[i] * \
                    Variable(gae) - 0.01 * player.entropies[i]
            else:
                policy_loss = policy_loss - \
                    player.log_probs[i].sum () * Variable(gae) - \
                    0.01 * player.entropies[i].sum ()

        player.model.zero_grad ()
        sum_loss = (policy_loss + value_loss)

        sum_loss.backward ()
        ensure_shared_grads (player.model, shared_model, gpu=gpu_id >= 0)
        optimizer.step ()
        player.clear_actions ()

        if rank == 0:
            train_step += 1
            if train_step % args.log_period == 0:
                log_info = {
                    # 'train: sum_loss': sum_loss, 
                    'train: value_loss': value_loss, 
                    'train: policy_loss': policy_loss, 
                    'train: advanage': advantage,
                    # 'train: entropy': entropy,
                    'train: eps reward': pinned_eps_reward,
                    # 'train: mean log prob': mean_log_prob
                }

                for tag, value in log_info.items ():
                    logger.scalar_summary (tag, value, train_step)
예제 #30
0
def after_scenario(self, context):
    Logger().write("Ending Scenario.....")