Ejemplo n.º 1
0
def demo():
  """
  Demo.
  """
  print("SprintDataset demo.")
  from argparse import ArgumentParser
  from Util import progress_bar_with_time
  from Log import log
  from Config import Config
  from Dataset import init_dataset
  arg_parser = ArgumentParser()
  arg_parser.add_argument("--config", help="config with ExternSprintDataset", required=True)
  arg_parser.add_argument("--sprint_cache_dataset", help="kwargs dict for SprintCacheDataset", required=True)
  arg_parser.add_argument("--max_num_seqs", default=sys.maxsize, type=int)
  arg_parser.add_argument("--action", default="compare", help="compare or benchmark")
  args = arg_parser.parse_args()
  log.initialize(verbosity=[4])
  sprint_cache_dataset_kwargs = eval(args.sprint_cache_dataset)
  assert isinstance(sprint_cache_dataset_kwargs, dict)
  sprint_cache_dataset = SprintCacheDataset(**sprint_cache_dataset_kwargs)
  print("SprintCacheDataset: %r" % sprint_cache_dataset)
  config = Config()
  config.load_file(args.config)
  dataset = init_dataset(config.typed_value("train"))
  print("Dataset via config: %r" % dataset)
  assert sprint_cache_dataset.num_inputs == dataset.num_inputs
  assert tuple(sprint_cache_dataset.num_outputs["classes"]) == tuple(dataset.num_outputs["classes"])
  sprint_cache_dataset.init_seq_order(epoch=1)

  if args.action == "compare":
    print("Iterating through dataset...")
    seq_idx = 0
    dataset.init_seq_order(epoch=1)
    while seq_idx < args.max_num_seqs:
      if not dataset.is_less_than_num_seqs(seq_idx):
        break
      dataset.load_seqs(seq_idx, seq_idx + 1)
      tag = dataset.get_tag(seq_idx)
      assert not tag.startswith("seq-"), "dataset does not provide tag-names for seqs"
      dataset_seq = sprint_cache_dataset.get_dataset_seq_for_name(tag)
      data = dataset.get_data(seq_idx, "data")
      targets = dataset.get_data(seq_idx, "classes")
      assert data.shape == dataset_seq.features["data"].shape
      assert targets.shape == dataset_seq.features["classes"].shape
      assert numpy.allclose(data, dataset_seq.features["data"])
      assert numpy.allclose(targets, dataset_seq.features["classes"])
      seq_idx += 1
      progress_bar_with_time(dataset.get_complete_frac(seq_idx))

    print("Finished through dataset. Num seqs: %i" % seq_idx)
    print("SprintCacheDataset has num seqs: %i." % sprint_cache_dataset.num_seqs)

  elif args.action == "benchmark":
    print("Iterating through dataset...")
    start_time = time.time()
    seq_tags = []
    seq_idx = 0
    dataset.init_seq_order(epoch=1)
    while seq_idx < args.max_num_seqs:
      if not dataset.is_less_than_num_seqs(seq_idx):
        break
      dataset.load_seqs(seq_idx, seq_idx + 1)
      tag = dataset.get_tag(seq_idx)
      assert not tag.startswith("seq-"), "dataset does not provide tag-names for seqs"
      seq_tags.append(tag)
      dataset.get_data(seq_idx, "data")
      dataset.get_data(seq_idx, "classes")
      seq_idx += 1
      progress_bar_with_time(dataset.get_complete_frac(seq_idx))
    print("Finished through dataset. Num seqs: %i, time: %f" % (seq_idx, time.time() - start_time))
    print("SprintCacheDataset has num seqs: %i." % sprint_cache_dataset.num_seqs)
    if hasattr(dataset, "exit_handler"):
      dataset.exit_handler()
    else:
      print("No way to stop any background tasks.")
    del dataset

    start_time = time.time()
    print("Iterating through SprintCacheDataset...")
    for i, tag in enumerate(seq_tags):
      sprint_cache_dataset.get_dataset_seq_for_name(tag)
      progress_bar_with_time(float(i) / len(seq_tags))
    print("Finished through SprintCacheDataset. time: %f" % (time.time() - start_time,))

  else:
    raise Exception("invalid action: %r" % args.action)
Ejemplo n.º 2
0
    def __init__(self, archFile):
        from Config import Config
        self.Cfg = Config(archFile)

        #build regex for data entries
        types = '|'.join(self.Cfg['DataTypes'].keys())
        self.dataEntryRegex = re.compile(
            "^\s*([^\s:]+)\s*:\s+\.(" + types +
            ")\s+((\s*[^\s,]+\s*,)*\s*[^\s,]+)\s*$")

        #regex of valid operations per instructiontype
        operationsPerType = {}
        for instructionTypeName, instructionList in self.Cfg.instructions.items(
        ):
            operationsPerType[instructionTypeName] = "|".join([
                instDesciption['@mnemonic']
                for _, instDesciption in instructionList.items()
            ])

        #construct list of regex for each instructionType
        self.instructionRegexes = {}
        for instructionTypeName, instructionType in self.Cfg['ISA'][
                'instructiontypes'].items():
            self.instructionRegexes[instructionTypeName] = []
            for fieldName, field in instructionType.items():
                if fieldName[0] == '@':
                    continue  #this is an attribute, not a sub_field in the xml config

                fieldTypeName = field['@type']
                fieldType = self.Cfg['ISA']['instructionFieldTypes'][
                    fieldTypeName]

                #opcode is a special field, allowed values are constructed from the instruction names of the various classes
                if fieldTypeName == "mnemonic":
                    self.instructionRegexes[instructionTypeName] += [
                        ('mnemonic',
                         "(" + operationsPerType[instructionTypeName] + ")",
                         "(" + fieldType["@separator"] + "|$)")
                    ]

                #the 'unused' field is not for the parser, skip it
                elif fieldTypeName == "unused":
                    continue

                #add the regex to the list
                else:
                    if '@regex' not in fieldType:
                        raise ValueError('instructionFieldTypes ' +
                                         fieldTypeName +
                                         ' does not specify a regex')
                    self.instructionRegexes[instructionTypeName] += [
                        (fieldName, fieldType['@regex'],
                         "(" + fieldType["@separator"] + "|$)")
                    ]

            #remove trailing separator from last field in instruction
            lastRegex = self.instructionRegexes[instructionTypeName][-1]
            self.instructionRegexes[instructionTypeName][-1] = (lastRegex[0],
                                                                lastRegex[1],
                                                                "$")

        #add regex for label
        self.label_token = '(\$[^\s;:]+)'
        self.instructionRegexes['label'] = [('target', self.label_token, ""),
                                            (None, '(:)', "$")]

        self.function_label_token = 'def\s+(\w+)\s*:'
        self.instructionRegexes['function_label'] = [
            ('target', self.function_label_token, "")
        ]
Ejemplo n.º 3
0
import os
import time
import utils
import numpy as np
import pandas as pd
import tensorflow as tf
from LSTM_CRF_PA import LSTM_CRF_PA
from Config import Config

print "preparing data"

con = Config()

(X_char_train,
 y_train), (X_char_dev,
            y_dev), (X_char_test,
                     y_test), (X_char_PA, y_PA) = utils.get_AllData(con.maxlen)

char2id, id2char = utils.loadMap(con.map_dict['char2id'])
label2id, id2label = utils.loadMap(con.map_dict['label2id'])

num_chars = len(id2char.keys())
num_classes = len(id2label.keys())

print 'num of chars:', num_chars
print 'num of classes:', num_classes

#X_char_train, y_train = utils.get_PA_data(con.dataset['traindata'], char2id, label2id)

# merge export and PA train data
X_char_merge_train, y_merge_train = utils.merge_export_and_PA_train_data(
original script was obtained from cheiner-chemistry library (MIT lisence).
some codes were changed

#TODO: we used GGNN, but conventional models, such as CNN, may be used to input "adjacency matrix" + "node vector".
"""
import chainer
from chainer import cuda
from chainer import datasets
import chainer.links as L
import chainer.functions as F

from GGNN import GGNN
from Config import Config

#settings
CF = Config()
CATEGORY_DIM = CF.CATEGORY_DIM
VALUE_DIM = CF.VALUE_DIM

if CF.genre == ["V"]:
    OUT_DIM = 1
else:
    OUT_DIM = VALUE_DIM

#GPU mode
GPUMode = CF.GPUMode

if GPUMode:
    xp = cuda.cupy
    import cupy
    gpu_device = 0
Ejemplo n.º 5
0
        # get image scale
        img_scale = img.shape[0], img.shape[1]

        # print(img.shape)
        img, bbox, label = self.transforms(img, bbox, label)

        # from (h, w, c) to (c, h, w)
        img = torch.tensor(img).permute(2, 0, 1)

        # from bgr to rgb
        img = img[(2, 1, 0), :, :]

        return img, bbox, label, img_id, ignore, img_scale

    def __len__(self):
        return len(self.id_list)


if __name__ == "__main__":
    config = Config("local")
    train_dataset = VOC_dataset(
        config.voc2007_root,
        config.voc2012_root,
        config.voc2007_test_anno,
        trn_val="test",
    )
    # just check everything is fine
    for img, bbox, label in tqdm(train_dataset):
        pass
    print(len(train_dataset))
Ejemplo n.º 6
0
from Directions import Directions
from Models import *

my_logger = logging.getLogger('traffic-logger')
my_logger.setLevel(logging.DEBUG)

handler = logging.FileHandler('logs/traffic-logger.log')
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

handler.setFormatter(formatter)

my_logger.addHandler(handler)

try:
    config = Config().get()

    engine = create_engine(config['sql_connection_string'])
    Session = sessionmaker(bind=engine)

    session = Session()

    api_key = config['google_api_key']

    forward_route_id = 1
    backward_route_id = 2

    forward_directions = Directions(session, api_key, forward_route_id)
    backward_directions = Directions(session, api_key, backward_route_id)

    my_logger.addHandler(handler)
Ejemplo n.º 7
0
import os
import time
import numpy as np
from numpy import random
import pandas as pd
import logging
import pdb
import tempfile
from sqlalchemy import create_engine

from sklearn.preprocessing import MinMaxScaler


from Config import Config

conf = Config()

log = logging.getLogger(__name__)
log.info('%s logger started.',__name__)


def load_data(instrument, train):
  if train:
    data_path = conf.TRAINING_DATA_PATH
    csv_path = os.path.join(data_path, instrument + conf.csv_file)
    if conf.num_of_rows_read > 0:
      return pd.read_csv(csv_path, sep=';', nrows=conf.num_of_rows_read)
    else:
      return pd.read_csv(csv_path, sep=';')
  else:
    data_path = conf.INPUT_PREDICT_DATA_PATH
Ejemplo n.º 8
0
def main(_):

    loader = Loader(flag="azenuz_small")
    config = Config(loader, flag="azenuz_small")
    config.gpu = 1
    if platform.system() == 'Linux':
        gpuid = config.gpu
        os.environ["CUDA_VISIBLE_DEVICES"] = '{}'.format(gpuid)
        device = '/gpu:' + str(gpuid)
    else:
        device = '/cpu:0'

    lr_updater = LearningRateUpdater(config.learning_rate, config.decay, config.decay_epoch)

    i = 0
    graph = tf.Graph()
    with graph.as_default():
        trainm = CTR_ggnn(config, device, loader, "Train")
        testm = CTR_ggnn(config, device, loader, "Valid")

    session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    session_config.gpu_options.allow_growth = True

    with tf.Session(graph=graph, config=session_config) as session:
        session.run(tf.global_variables_initializer())
        # CTR_GNN_loader(session, config)
        best_auc = 0.
        best_logloss = 1.
        best_epoch_auc = 0.
        best_epoch_logloss = 0.
        auc = 0.
        for epoch in range(config.epoch_num):
            trainm.update_lr(session, lr_updater.get_lr())
            cost, auc = run(session, config, trainm, loader, verbose=True)
            INFO_LOG("Epoch %d Train AUC %.3f" % (epoch + 1, auc))
            INFO_LOG("Epoch %d Train costs %.3f" %
                     (epoch, cost))
            session.run(tf.local_variables_initializer())

            cost, auc = run(session, config, testm, loader, verbose=True)
            INFO_LOG("Epoch %d Valid AUC %.3f" % (epoch, auc))
            INFO_LOG("Epoch %d Valid cost %.3f" % (epoch, cost))
            # #

            lr_updater.update(auc, epoch)
            if best_auc < auc:
                best_auc = auc
                best_epoch_auc = epoch
                CTR_GNN_saver(session, config, best_auc, best_epoch_auc)

            if best_logloss > cost:
                best_logloss = cost
                best_epoch_logloss = epoch
                # CTR_GNN_saver(session, config, best_epoch_logloss, best_epoch_logloss)


            INFO_LOG("*** best AUC now is %.3f in %d epoch" % (best_auc, best_epoch_auc))
            INFO_LOG("*** best logloss now is %.3f in %d epoch" % (best_logloss, best_epoch_logloss))

            if epoch % 300 == 0 and epoch != 0:
                loader.change_data_list(loader.increase_data_idx())
Ejemplo n.º 9
0
Archivo: Db.py Proyecto: lxy235/lserver
 def __init__(self):
     self.config = Config()
Ejemplo n.º 10
0
"""The driver program; imports other modules, accepts
command-line arguments and calls upon other modules to display a fractal
on-screen and write a PNG image.  This file is the main entry point of the
program."""
import sys
from FractalFactory import FractalFactory
from GradientFactory import GradientFactory
from ImagePainter import ImagePainter
from Config import Config

if __name__ == '__main__':

    if len(sys.argv) < 2:
        configDict = Config().readFile(None)
        ImagePainter(FractalFactory().makeFractal(configDict),
                     GradientFactory().makeGradient(configDict, None),
                     configDict)

    elif len(sys.argv) < 3:
        configDict = Config().readFile(sys.argv[1])
        ImagePainter(FractalFactory().makeFractal(configDict),
                     GradientFactory().makeGradient(configDict, None),
                     configDict)

    else:
        configDict = Config().readFile(sys.argv[1])
        ImagePainter(FractalFactory().makeFractal(configDict),
                     GradientFactory().makeGradient(configDict, sys.argv[2]),
                     configDict)
Ejemplo n.º 11
0
 def __init__(self, stage_table):
     self.db_config = Config().db_config
     #self.insert_query = Config().insert_query
     self.query = SqlExecute(stage_table)
Ejemplo n.º 12
0
 def __init__(self):
     self.config = Config()
     self._srcdir = self.config.src_dir
     self._prefix = self.config.prefix
Ejemplo n.º 13
0
 def reset(self):
     self.config = Config()
Ejemplo n.º 14
0
def main():
    config = Config()
    config.setArgsConfig(parseScriptArguments(sys.argv[1:]))

    if config.argsconfig['task'] is Task.DETERMINEMODEL:
        for filename in listOfModelTestFiles(
                Config().argsconfig['inputDirOrFile']):
            generator = BonnmotionParamsGenerator()
            generator.setModeltestFilename(filename)
            generator.createBonnmotionParamsFiles()
            if config.argsconfig['delete'] == True:
                DataAccess().cleartable(generator.modelname)
            log("starting model-determination of " + generator.modelname)
            md = ModelDeterminationDispatcher(generator.noOfFilesCreated)
            log("done. " + str(generator.noOfFilesCreated) + " hashes saved.")
            md.cleanup()
    elif config.argsconfig['task'] is Task.VALIDATEMODEL:
        md5 = []
        sha1 = []
        result = []

        if config.argsconfig.has_key('arg'):
            DataAccess().get(config.argsconfig['arg'], result, md5,
                             sha1)  #get parameters and hashes from database
            log("starting model-validation of " + config.argsconfig['arg'])
        else:
            DataAccess().get4(result, md5, sha1)

        n = 0
        for x in result:
            f = open(
                os.path.join(
                    Config().readConfigEntry('bonnmotionvalidatepath'),
                    config.readConfigEntry('tempoutputparamsfile').replace(
                        'INDEX', str(n))), 'w')
            f.write(x['bmparamsfile'])
            f.close()
            n += 1

        mv = ModelValidationDispatcher(n, md5, sha1)
        log("done. " + str(n) + " hashes checked.")
    elif config.argsconfig['task'] is Task.DETERMINEAPP:
        for filename in listOfAppTestFiles(
                config.argsconfig['inputDirOrFile']):
            generator = BonnmotionParamsGenerator()
            generator.setApptestFilename(filename)
            params = generator.parseApptestFile()

            if config.argsconfig['delete'] == True:
                DataAccess().cleartable(params['app'])  ##APP-DATATABLE

            if params['inputDirOrFile'] is InputDirOrFile.FILE:
                generator2 = BonnmotionParamsGenerator()
                generator2.setModeltestFilename(params['paramsfile'])
                generator2.createBonnmotionParamsFiles()
                log("starting app determination: " + filename +
                    ", modeltest: " + params['paramsfile'])
                bmd = BonnmotionDispatcher(
                    generator2.noOfFilesCreated,
                    config.readConfigEntry('bonnmotionvalidatepath'))
                add = AppDeterminationDispatcher(generator2.noOfFilesCreated,
                                                 params)
                log("done")
                add.cleanup()
            elif params['inputDirOrFile'] is InputDirOrFile.DIRECTORY:
                for file in os.listdir(params['paramsfile']):
                    if file.endswith('.modeltest'):
                        generator2 = BonnmotionParamsGenerator()
                        generator2.setModeltestFilename(
                            os.path.join(params['paramsfile'], file))
                        generator2.createBonnmotionParamsFiles()
                        log("starting app determination: " + filename +
                            ", modeltest: " + file)
                        bmd = BonnmotionDispatcher(
                            generator2.noOfFilesCreated,
                            config.readConfigEntry('bonnmotionvalidatepath'))
                        add = AppDeterminationDispatcher(
                            generator2.noOfFilesCreated, params)
                        add.cleanup()
                        log("done")
    elif config.argsconfig['task'] is Task.VALIDATEAPP:
        result = []
        if config.argsconfig.has_key('arg'):
            DataAccess().get2(config.argsconfig['arg'],
                              result)  #get parameters and hashes from database
            log("starting app-validation of " + config.argsconfig['arg'])
        else:
            DataAccess().get3(result)

        n = 0
        for x in result:
            f = open(
                os.path.join(
                    Config().readConfigEntry('bonnmotionvalidatepath'),
                    config.readConfigEntry('tempoutputparamsfile').replace(
                        'INDEX', str(n))), 'w')
            f.write(x['bmparamsfile'])
            f.close()
            n += 1
        bmd = BonnmotionDispatcher(
            n, config.readConfigEntry('bonnmotionvalidatepath'))
        AppValidationDispatcher(n, result).cleanup()
        log("done. " + str(n) + " hashes checked.")
Ejemplo n.º 15
0
 def __init__(self):
     self.config = Config()
     self.data = DataLoaders()
Ejemplo n.º 16
0
 def __init__(self, config_filename):
     self.config = Config(config_filename=config_filename)
Ejemplo n.º 17
0
def init_config(config_filename=None,
                command_line_options=(),
                default_config=None,
                extra_updates=None):
    """
  :param str|None config_filename:
  :param list[str]|tuple[str] command_line_options: e.g. ``sys.argv[1:]``
  :param dict[str]|None default_config:
  :param dict[str]|None extra_updates:

  Initializes the global config.
  There are multiple sources which are used to init the config:

    * ``configFilename``, and maybe first item of ``commandLineOptions`` interpret as config filename
    * other options via ``commandLineOptions``
    * ``extra_updates``

  Note about the order/priority of these:

    * ``extra_updates``
    * options from ``commandLineOptions``
    * ``configFilename``
    * config filename from ``commandLineOptions[0]``
    * ``extra_updates``
    * options from ``commandLineOptions``

  ``extra_updates`` and ``commandLineOptions`` are used twice so that they are available
  when the config is loaded, which thus has access to them, and can e.g. use them via Python code.
  However, the purpose is that they overwrite any option from the config;
  that is why we apply them again in the end.

  ``commandLineOptions`` is applied after ``extra_updates`` so that the user has still the possibility
  to overwrite anything set by ``extra_updates``.
  """
    global config
    config = Config()

    config_filenames_by_cmd_line = []
    if command_line_options:
        # Assume that the first argument prefixed with "+" or "-" and all following is not a config file.
        i = 0
        for arg in command_line_options:
            if arg[:1] in "-+":
                break
            config_filenames_by_cmd_line.append(arg)
            i += 1
        command_line_options = command_line_options[i:]

    if default_config:
        config.update(default_config)
    if extra_updates:
        config.update(extra_updates)
    if command_line_options:
        config.parse_cmd_args(command_line_options)
    if config_filename:
        config.load_file(config_filename)
    for fn in config_filenames_by_cmd_line:
        config.load_file(fn)
    if extra_updates:
        config.update(extra_updates)
    if command_line_options:
        config.parse_cmd_args(command_line_options)

    # I really don't know where to put this otherwise:
    if config.bool("EnableAutoNumpySharedMemPickling", False):
        import TaskSystem
        TaskSystem.SharedMemNumpyConfig["enabled"] = True
    # Server default options
    if config.value('task', 'train') == 'server':
        config.set('num_inputs', 2)
        config.set('num_outputs', 1)
Ejemplo n.º 18
0
def fetch_remote_list(args, require_attribs=False, recursive=None):
    def _get_filelist_remote(remote_uri, recursive=True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(),
                                  prefix=remote_uri.object(),
                                  recursive=recursive)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/') + 1]
            remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = SortedDict(ignore_case=False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][
                    -1] != os.path.sep:
                ## We asked for one file and we got that file :-)
                key = os.path.basename(object['Key'])
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = {
                }  ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][
                    rem_base_len:]  ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            rem_list[key] = {
                'size': int(object['Size']),
                'timestamp': dateS3toUnix(
                    object['LastModified']
                ),  ## Sadly it's upload time, not our lastmod time :-(
                'md5': object['ETag'][1:-1],
                'object_key': object['Key'],
                'object_uri_str': object_uri_str,
                'base_uri': remote_uri,
            }
            if break_now:
                break
        return rem_list

    cfg = Config()
    remote_uris = []
    remote_list = SortedDict(ignore_case=False)

    if type(args) not in (list, tuple):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 's3':
            raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
        remote_uris.append(uri)

    if recursive:
        for uri in remote_uris:
            objectlist = _get_filelist_remote(uri)
            for key in objectlist:
                remote_list[key] = objectlist[key]
    else:
        for uri in remote_uris:
            uri_str = str(uri)
            ## Wildcards used in remote URI?
            ## If yes we'll need a bucket listing...
            if uri_str.find('*') > -1 or uri_str.find('?') > -1:
                first_wildcard = uri_str.find('*')
                first_questionmark = uri_str.find('?')
                if first_questionmark > -1 and first_questionmark < first_wildcard:
                    first_wildcard = first_questionmark
                prefix = uri_str[:first_wildcard]
                rest = uri_str[first_wildcard + 1:]
                ## Only request recursive listing if the 'rest' of the URI,
                ## i.e. the part after first wildcard, contains '/'
                need_recursion = rest.find('/') > -1
                objectlist = _get_filelist_remote(S3Uri(prefix),
                                                  recursive=need_recursion)
                for key in objectlist:
                    ## Check whether the 'key' matches the requested wildcards
                    if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'],
                                            uri_str):
                        remote_list[key] = objectlist[key]
            else:
                ## No wildcards - simply append the given URI to the list
                key = os.path.basename(uri.object())
                if not key:
                    raise ParameterError(
                        u"Expecting S3 URI with a filename or --recursive: %s"
                        % uri.uri())
                remote_item = {
                    'base_uri': uri,
                    'object_uri_str': unicode(uri),
                    'object_key': uri.object()
                }
                if require_attribs:
                    response = S3(cfg).object_info(uri)
                    remote_item.update({
                        'size':
                        int(response['headers']['content-length']),
                        'md5':
                        response['headers']['etag'].strip('"\''),
                        'timestamp':
                        dateRFC822toUnix(response['headers']['date'])
                    })
                remote_list[key] = remote_item
    return remote_list
Ejemplo n.º 19
0
#判断路径是否存在,否则创建
def check_path(filename):
    #将文件路径分割出来
    file_dir = os.path.split(filename)[0]
    if not os.path.isdir(file_dir):
        os.makedirs(file_dir)
        return 1  #表示路径不存在,已创建文件
    return 2  #表示路径已存在


'''
全局参数
'''

#获取路径
my_dirpath = cur_file_dir()
# my_log = Logger(os.path.join(dirpath, "./log/service%s.log"%(time.strftime("%Y-%m-%d_%H%M", time.localtime()) )),level='debug')
#日志路径
log_path = os.path.join(my_dirpath, "./log/service")
check_path(log_path)
my_log = Logger(log_path, level='debug')
#获取配置
config_path = os.path.join(my_dirpath, "config.ini")
print(config_path)
config = Config(config_path)
my_secret, svc_name, svc_display_name, my_debug, my_version = config.get_config(
)
my_log.logger.info(
    [my_secret, svc_name, svc_display_name, my_debug, my_version])
Ejemplo n.º 20
0
def compare_filelists(src_list, dst_list, src_remote, dst_remote):
    def __direction_str(is_remote):
        return is_remote and "remote" or "local"

    # We don't support local->local sync, use 'rsync' or something like that instead ;-)
    assert (not (src_remote == False and dst_remote == False))

    info(u"Verifying attributes...")
    cfg = Config()
    exists_list = SortedDict(ignore_case=False)

    debug("Comparing filelists (direction: %s -> %s)" %
          (__direction_str(src_remote), __direction_str(dst_remote)))
    debug("src_list.keys: %s" % src_list.keys())
    debug("dst_list.keys: %s" % dst_list.keys())

    for file in src_list.keys():
        debug(u"CHECK: %s" % file)
        if dst_list.has_key(file):
            ## Was --skip-existing requested?
            if cfg.skip_existing:
                debug(u"IGNR: %s (used --skip-existing)" % (file))
                exists_list[file] = src_list[file]
                del (src_list[file])
                ## Remove from destination-list, all that is left there will be deleted
                del (dst_list[file])
                continue

            attribs_match = True
            ## Check size first
            if 'size' in cfg.sync_checks and dst_list[file][
                    'size'] != src_list[file]['size']:
                debug(u"XFER: %s (size mismatch: src=%s dst=%s)" %
                      (file, src_list[file]['size'], dst_list[file]['size']))
                attribs_match = False

            if attribs_match and 'md5' in cfg.sync_checks:
                ## ... same size, check MD5
                try:
                    if src_remote == False and dst_remote == True:
                        src_md5 = hash_file_md5(src_list[file]['full_name'])
                        dst_md5 = dst_list[file]['md5']
                    elif src_remote == True and dst_remote == False:
                        src_md5 = src_list[file]['md5']
                        dst_md5 = hash_file_md5(dst_list[file]['full_name'])
                    elif src_remote == True and dst_remote == True:
                        src_md5 = src_list[file]['md5']
                        dst_md5 = dst_list[file]['md5']
                except (IOError, OSError), e:
                    # MD5 sum verification failed - ignore that file altogether
                    debug(u"IGNR: %s (disappeared)" % (file))
                    warning(u"%s: file disappeared, ignoring." % (file))
                    del (src_list[file])
                    del (dst_list[file])
                    continue

                if src_md5 != dst_md5:
                    ## Checksums are different.
                    attribs_match = False
                    debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" %
                          (file, src_md5, dst_md5))

            if attribs_match:
                ## Remove from source-list, all that is left there will be transferred
                debug(u"IGNR: %s (transfer not needed)" % file)
                exists_list[file] = src_list[file]
                del (src_list[file])

            ## Remove from destination-list, all that is left there will be deleted
            del (dst_list[file])
Ejemplo n.º 21
0
import torch
from torch.utils.data import DataLoader
from MyDataSet import MyDataSet
from Tokenizer import Tokenizer
from pad import pad
from Config import Config
from Seq2Seq import Seq2Seq

if __name__ == '__main__':
    source_path = '../data/test/source.txt'
    target_path = '../data/test/target.txt'
    vocab_path = '../data/vocab.txt'
    model_path = '../model/model.pth'
    tokenizer = Tokenizer(vocab_path)
    config = Config()
    fr = open('../result/test.txt', 'w', encoding='utf-8')  # 存储预测结果

    loader = DataLoader(dataset=MyDataSet(source_path, target_path, tokenizer),
                        batch_size=config.batch_size,
                        shuffle=True,
                        num_workers=2,
                        collate_fn=pad,
                        drop_last=False)  # 最后一个batch数据集不丢弃

    if not torch.cuda.is_available():
        print('No cuda is available!')
        exit()
    device = torch.device('cuda:0')
    model = Seq2Seq(config)
    model.to(device)
Ejemplo n.º 22
0
def fetch_local_list(args, recursive=None):
    def _get_filelist_local(local_uri):
        info(u"Compiling list of local files...")
        if local_uri.isdir():
            local_base = deunicodise(local_uri.basename())
            local_path = deunicodise(local_uri.path())
            filelist = _fswalk(local_path, cfg.follow_symlinks)
            single_file = False
        else:
            local_base = ""
            local_path = deunicodise(local_uri.dirname())
            filelist = [(local_path, [], [deunicodise(local_uri.basename())])]
            single_file = True
        loc_list = SortedDict(ignore_case=False)
        for root, dirs, files in filelist:
            rel_root = root.replace(local_path, local_base, 1)
            for f in files:
                full_name = os.path.join(root, f)
                if not os.path.isfile(full_name):
                    continue
                if os.path.islink(full_name):
                    if not cfg.follow_symlinks:
                        continue
                relative_file = unicodise(os.path.join(rel_root, f))
                if os.path.sep != "/":
                    # Convert non-unix dir separators to '/'
                    relative_file = "/".join(relative_file.split(os.path.sep))
                if cfg.urlencoding_mode == "normal":
                    relative_file = replace_nonprintables(relative_file)
                if relative_file.startswith('./'):
                    relative_file = relative_file[2:]
                sr = os.stat_result(os.lstat(full_name))
                loc_list[relative_file] = {
                    'full_name_unicode': unicodise(full_name),
                    'full_name': full_name,
                    'size': sr.st_size,
                    'mtime': sr.st_mtime,
                    ## TODO: Possibly more to save here...
                }
        return loc_list, single_file

    cfg = Config()
    local_uris = []
    local_list = SortedDict(ignore_case=False)
    single_file = False

    if type(args) not in (list, tuple):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 'file':
            raise ParameterError(
                "Expecting filename or directory instead of: %s" % arg)
        if uri.isdir() and not recursive:
            raise ParameterError("Use --recursive to upload a directory: %s" %
                                 arg)
        local_uris.append(uri)

    for uri in local_uris:
        list_for_uri, single_file = _get_filelist_local(uri)
        local_list.update(list_for_uri)

    ## Single file is True if and only if the user
    ## specified one local URI and that URI represents
    ## a FILE. Ie it is False if the URI was of a DIR
    ## and that dir contained only one FILE. That's not
    ## a case of single_file==True.
    if len(local_list) > 1:
        single_file = False

    return local_list, single_file
Ejemplo n.º 23
0
 def __init__(self):
     self.compressorConfig = Config().compressorConfig
     self.imageHandlerConfig = Config().imageHandlerConfig
Ejemplo n.º 24
0
    args = parser.parse_args()

    if not os.path.isdir(args.proj_dir):
        print "!! Please provide a valid directory, given: %s" % (args.proj_dir)
    	sys.exit()

    print "Going to take snapshot for project %s" % (args.proj_dir)

    print "Creating output directory at %s" % (args.out_dir)


    Util.cleanup(args.log_file)

    Log.setLogger(args.verbose, args.log_file)

    cfg = Config(args.config_file)

    #1. First, retrieve the 1st commit date from SQL server
    commit_dates = fetchCommitDates(cfg, args.proj_dir, args.lang)

    #2. Snapshot
    dumpSnapShots(args.proj_dir, args.out_dir, int(args.ss_interval_len), commit_dates[0], commit_dates[1])

    project_name = pathLeaf(args.proj_dir)
    ss_dir = os.path.abspath(args.out_dir)
    ss_names = os.listdir(ss_dir + '/' + project_name)
    ss_names.sort()
    ss_paths = [ss_dir + '/' + project_name + '/' + ss_name + '/' for ss_name in ss_names]

    ss_name_to_sha = {}
    for ss_index, ss_path in enumerate(ss_paths):
Ejemplo n.º 25
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct  4 16:20:39 2017

@author: Yacalis
"""

import time
from Config import Config

# =============================================================================
# the 'regular' format combines [lat, lon, month, time] into one dimension,
# but the 'detailed' format leaves all of that uncombined
# =============================================================================
if Config().config.use_detailed_data:
    nc_file = '../SPCAM/SPCAM_Stephan/SPCAM_outputs_detailed.nc'
    mean_file = '../SPCAM/SPCAM_Stephan/SPCAM_mean_detailed.nc'
    std_file = '../SPCAM/SPCAM_Stephan/SPCAM_std_detailed.nc'
else:
    nc_file = '../SPCAM/SPCAM_Pierre/SPCAM_outputs_tropics.nc'
    mean_file = '../SPCAM/SPCAM_new_norms/SPCAM_mean.nc'
    std_file = '../SPCAM/SPCAM_new_norms/SPCAM_std.nc'
    max_file = '../SPCAM/SPCAM_new_norms/SPCAM_max.nc'


# =============================================================================
# the logdir name is long, but it beats having to look at the parameter json
# file just to see what the most important values are
# =============================================================================
def get_logdir(config: object) -> str:
Ejemplo n.º 26
0
    :param recv: 邮箱接收人地址,多个账号以逗号隔开
    :param title: 邮件标题
    :param content: 邮件内容
    :param mail_host: 邮箱服务器
    :param port: 端口号
    :return:
    '''
    if file:
        msg = MIMEMultipart()

        # 构建正文
        part_text = MIMEText(content)
        msg.attach(part_text)  # 把正文加到邮件体里面去

        # 构建邮件附件
        part_attach1 = MIMEApplication(open(file, 'rb').read())  # 打开附件
        part_attach1.add_header('Content-Disposition', 'attachment', filename=file)  # 为附件命名
        msg.attach(part_attach1)  # 添加附件
    else:
        msg = MIMEText(content)  # 邮件内容
    msg['Subject'] = title  # 邮件主题
    msg['From'] = username  # 发送者账号
    msg['To'] = recv  # 接收者账号列表
    smtp = smtplib.SMTP(mail_host, port=port)
    smtp.login(username, passwd)  # 登录
    smtp.sendmail(username, recv, msg.as_string())
    smtp.quit()
if __name__ == '__main__':
    from Config.Config import *
    path = Config().case_data_file_path
    send_mail(username='******',passwd='XKXCOUSQLDJJLWRP',recv='*****@*****.**',title='python_test',content='python_test_report',file=path)
Ejemplo n.º 27
0
class Keyframe:
    kfID = 0
    voc = pyfbow.Vocabulary()
    voc.readFromFile(Config().bow)
    kfdb = KeyframeDatabase()

    def __init__(self, frame):
        self.timestamp = frame.timestamp

        self.kfID = Keyframe.kfID
        self.fID = frame.id
        Keyframe.kfID += 1

        self.fp, self.des = frame.fp, frame.des
        self.depth = frame.depth

        self.height, self.width = frame.height, frame.width

        self.pose = frame.pose
        # self.rot = frame.rot
        # self.pos = frame.pos

        self.neighbors = []

        # self.bow = []
        # self.bow_ind = []
        # def compute_bow(self):
        # Extract bow vector from features, and the indices of the words at the 4th lvl of tree
        self.bow, bow_ind = Keyframe.voc.transform2(self.des, 4)
        self.bow_ind = bow_ind.keys()

        Keyframe.kfdb.insert(self)

        self.key_points = dict()
        self.n_kp = 0  # Number of keypoints

        self.covariance = None

    def key_point_initializer(self):
        fx = Config().fx
        fy = Config().fy
        cx = Config().cx
        cy = Config().cy
        for idx in range(len(self.fp)):
            d = self.depth[idx]
            pos = [(self.fp[idx].pt[0] - cx) / fx * d,
                   (self.fp[idx].pt[1] - cy) / fy * d, d]
            self.key_points[idx] = KeyPoint(pos, self.des[idx])
        self.n_kp = len(self.fp)

    def add_key_point(self, idx, kp):  # idx of the feature point, and kp
        if idx not in self.key_points:  # if kp is not in this
            self.key_points[idx] = kp
            self.n_kp += 1
            return True
        else:
            assert kp.id == self.key_points[idx].id  # .get_id()
            return False

    def new_key_point(self, idx, pos):
        if idx not in self.key_points:
            new_kp = KeyPoint(pos, self.des[idx])
            self.key_points[idx] = new_kp
            self.n_kp += 1
            return new_kp
        else:
            return self.key_points[idx]

    def get_key_point(self, idx):
        if idx in self.key_points:
            return self.key_points[idx]
        else:
            return None

    def set_pose(self, rot, trans):
        pose = np.eye(4)
        pose[0:3, 0:3] = rot
        pose[0:3, 3] = trans
        self.pose = pose

    def rot(self):
        # if self.pose:
        #     return Rotation.from_quat(self.pose[3:]).as_matrix()
        return self.pose[0:3, 0:3]
        # return None

    def pos(self):
        # if self.pose:
        #     return np.array(self.pose[0:3])
        return self.pose[0:3, 3]
        # return None

    def pose_matrix(self):
        # r = self.rot()
        # t = self.pos()
        # tt = np.expand_dims(t, axis=1)
        # return np.concatenate((np.concatenate((r, tt), axis=1),
        #                        np.array([0, 0, 0, 1], ndmin=2)), axis=0)
        return self.pose

    def neighbors_list(self):
        return [kf for kf, _, _ in self.neighbors]
Ejemplo n.º 28
0
import gym
import gym_battleships

from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.callbacks import CheckpointCallback, EvalCallback, StopTrainingOnRewardThreshold
from stable_baselines import ACKTR
from Config import Config
from Result import Result

# Inits Battleship gym environments and config
config = Config(5, [3, 2, 2], True, False, False)
env2 = gym.make('Battleships-v0', config=config)
env3 = gym.make('Battleships-v0', config=config)
env = DummyVecEnv([lambda: env2])
env4 = DummyVecEnv([lambda: env3])

# Define Callback
#Callback stops training if maximum is reached in mean reward
callback_on_best = StopTrainingOnRewardThreshold(
    reward_threshold=env2.calculate_threshold(), verbose=1)
# Callback safes the currently best model
eval_callback = EvalCallback(env4,
                             callback_on_new_best=callback_on_best,
                             verbose=1,
                             best_model_save_path='./ACKTR_Models/best/')
checkpoint_callback = CheckpointCallback(save_freq=1e4,
                                         save_path='./model_checkpoints/')

# Uncomment, to train a new fresh model, otherwise a allready trained model will be trained
# If a frehs model is trained, it should be trained with binary reward (Config) first, to reduce multiple
# shots onto the same field.
Ejemplo n.º 29
0
 def registerExtenderCallbacks(self, callbacks):
     self.mCallBacks = callbacks
     self.config = Config(callbacks)
     self.loader = Loader(self.config)
     self.dispatcher = Dispatcher(self.config, self.loader.getPlugins())
Ejemplo n.º 30
0
    def run(self):
        train_itr = 0
        learn_count = 0
        episode_frames = []
        episode_count = 0
        deleted_value = 0
        deleted_age = 0
        deleted_demo = 0

        pre_score = 0
        pre_train_itr = 0
        #
        lock = threading.Lock()
        print(self.name)

        count = 0
        scores, e, replay_full_episode = [], 0, None
        filename = ''

        random.shuffle(self.episodeList)
        epsidoe_list_count = 0
        self.episode = self.episodeList[epsidoe_list_count]
        self.i, self.f = goNextEpisode(self.i, self.f, self.episode)
        episodeEnd = False

        sample_log = openLog(Config.LEARNER_DATA_PATH + 'sampleexp/', '',
                             ['step', 'value', 'age', 'demo', 'qvalue'])
        replay_log = openLog(
            Config.LEARNER_DATA_PATH + 'replaymemory/', '',
            ['step', 'root_priority', 'root_ts', 'root_demo', 'alpha', 'beta'])
        delete_log = openLog(Config.ACTOR_DATA_PATH + 'deletedexp/', '',
                             ['step', 'train_itr', 'value', 'age', 'demo'])
        episode_log = openLog(Config.ACTOR_DATA_PATH + 'episodescore/', '',
                              ['episode', 'score'])
        actor_done, actor_score, actor_n_step_reward, actor_state = False, 0, None, self.env.reset(
        )
        human_done, human_score, human_n_step_reward, human_state = False, 0, None, np.zeros(
            [83, 83, 3], dtype=np.float32)
        episodeEnd = False
        t_q_actor = deque(maxlen=Config.trajectory_n)
        t_q_human = deque(maxlen=Config.trajectory_n)
        episode_count = 0
        train_itr = train_itr + 1
        avg_actor_time_step = 0
        act_itr = 0
        while (learn_count < Config.LEARNER_TRAINING_STEP):

            human_state = process_frame(human_state)
            actor_state = process_frame(actor_state)

            while actor_done is False and episodeEnd is False:
                startTime = time.time()
                if (train_itr % Config.ACTOR_HUMAN_COUNT != 0):
                    action = self.agent.egreedy_action(
                        actor_state)  # e-greedy action for train
                    next_state, reward, actor_done, _ = self.env.step(action)
                    # env.render()
                    episode_frames.append(next_state)
                    next_state = process_frame(next_state)
                    # print(next_state)
                    actor_score += reward
                    reward = sign(reward) * math.log(
                        1 + abs(reward)) if not actor_done else sign(
                            -100) * math.log(1 + abs(-100))
                    reward_to_sub = 0. if len(
                        t_q_actor) < t_q_actor.maxlen else t_q_actor[0][
                            2]  # record the earliest reward for the sub
                    t_q_actor.append([
                        actor_state, action, reward, next_state, actor_done,
                        0.0
                    ])

                    if len(t_q_actor) == t_q_actor.maxlen:
                        if actor_n_step_reward is None:  # only compute once when t_q first filled
                            actor_n_step_reward = sum([
                                t[2] * Config.GAMMA**i
                                for i, t in enumerate(t_q_actor)
                            ])
                        else:
                            actor_n_step_reward = (
                                actor_n_step_reward -
                                reward_to_sub) / Config.GAMMA
                            actor_n_step_reward += reward * Config.GAMMA**(
                                Config.trajectory_n - 1)
                        t_q_actor[0].extend([
                            actor_n_step_reward, next_state, actor_done,
                            t_q_actor.maxlen, self.agent.time_step
                        ])  # actual_n is max_len here
                        self.agent.perceive(
                            t_q_actor[0], self.agent.time_step
                        )  # perceive when a transition is completed
                        # print(demo)
                        # print(t_q[0][3])
                        # print(self.learner.time_step)

                        actor_state = next_state
                if (train_itr % Config.ACTOR_HUMAN_COUNT == 0):
                    startTime = time.time()
                    next_state, reward, human_done, action, episodeEnd = step(
                        self.i, self.f, self.episode)
                    self.i = self.i + 1
                    if (episodeEnd != True):
                        human_score += reward
                        reward = sign(reward) * math.log(
                            1 + abs(reward)) if not human_done else sign(
                                -100) * math.log(1 + abs(-100))
                        reward_to_sub = 0. if len(
                            t_q_human) < t_q_human.maxlen else t_q_human[0][
                                2]  # record the earliest reward for the sub
                        t_q_human.append([
                            human_state, action, reward, next_state,
                            human_done, 1.0
                        ])
                        # print(next_state)
                        if len(t_q_human) == t_q_human.maxlen:
                            if human_n_step_reward is None:  # only compute once when t_q first filled
                                human_n_step_reward = sum([
                                    t[2] * Config.GAMMA**i
                                    for i, t in enumerate(t_q_human)
                                ])
                            else:
                                human_n_step_reward = (
                                    human_n_step_reward -
                                    reward_to_sub) / Config.GAMMA
                                human_n_step_reward += reward * Config.GAMMA**(
                                    Config.trajectory_n - 1)

                            t_q_human[0].extend([
                                human_n_step_reward, next_state, human_done,
                                t_q_human.maxlen, self.agent.time_step
                            ])  # actual_n is max_len here
                            self.agent.perceive(
                                t_q_human[0], self.agent.time_step
                            )  # perceive when a transition is completed
                        human_state = next_state
                train_itr = train_itr + 1
                if self.agent.replay_memory.full():
                    self.agent.train_Q_network(
                        update=False)  # train along with generation
                    learn_count += 1
                    if (train_itr % Config.LEARNER_TRAINING_PART == 0):
                        self.agent.save_model()
                        sample_demo = float(self.agent.demo_num) / (
                            Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)
                        sample_value = math.pow(
                            self.agent.sum_abs_error /
                            (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE),
                            0.4)
                        sample_age = self.agent.sum_age / (
                            Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)
                        sample_q = self.agent.qvalue / (
                            Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)
                        print("learner_sample")
                        print(sample_value)
                        print(sample_age)
                        print(sample_demo)
                        print(sample_q)

                        sum_sample_q = 0
                        for i in range(6):
                            sum_sample_q += sample_q[i]
                        print(sum_sample_q)
                        self.agent.sum_abs_error = 0
                        self.agent.demo_num = 0
                        self.agent.sum_age = 0
                        self.agent.qvalue = 0
                        print("replay_memory")
                        print(self.agent.replay_memory.tree.total_p)
                        writeLog(Config.LEARNER_DATA_PATH + 'sampleexp/',
                                 sample_log, [
                                     str(train_itr),
                                     str(sample_value),
                                     str(sample_age),
                                     str(sample_demo),
                                     str(sum_sample_q)
                                 ])
                        writeLog(
                            Config.LEARNER_DATA_PATH + 'replaymemory/',
                            replay_log, [
                                str(train_itr),
                                str(self.agent.replay_memory.tree.total_p),
                                str(self.agent.replay_memory.tree.total_ts),
                                str(self.agent.replay_memory.tree.total_d),
                                str(self.agent.replay_memory.tree.alpha),
                                str(self.agent.replay_memory.tree.beta)
                            ])
                        gc.collect()
                    replay_full_episode = replay_full_episode or e
                if learn_count % Config().UPDATE_TARGET_NET == 0:
                    # print("actor_update_target"+str(train_itr))
                    self.agent.sess.run(self.agent.update_target_net)
                if (train_itr % 100 == 0):
                    print("process time : " + str(time.time() - startTime) +
                          "/" +
                          str(self.agent.replay_memory.tree.data_pointer))

                if (train_itr % Config.ACTOR_ACTING_PART == 0):
                    sum_value = self.agent.replay_memory.tree.avg_val / Config.ACTOR_ACTING_PART
                    sum_age = self.agent.replay_memory.tree.avg_time / Config.ACTOR_ACTING_PART
                    sum_demo = self.agent.replay_memory.tree.avg_demo / Config.ACTOR_ACTING_PART
                    print("actor_deleted")
                    print(sum_value)
                    print(sum_age)
                    print(sum_demo)
                    writeLog(Config.ACTOR_DATA_PATH + 'deletedexp/',
                             delete_log, [
                                 str(count),
                                 str(train_itr),
                                 str(sum_value),
                                 str(sum_age),
                                 str(sum_demo)
                             ])
                    self.agent.replay_memory.tree.avg_val = 0
                    self.agent.replay_memory.tree.avg_time = 0
                    self.agent.replay_memory.tree.avg_demo = 0
            if actor_done:
                # handle transitions left in t_q

                t_q_actor.popleft()  # first transition's n-step is already set
                transitions = set_n_step(t_q_actor, Config.trajectory_n,
                                         self.agent.time_step)

                for t in transitions:
                    self.agent.perceive(t, self.agent.time_step)
                if self.agent.replay_memory.full():
                    delta = actor_score - pre_score
                    actor_num = 1
                    sub_train_itr = learn_count - pre_train_itr
                    # print(sub_train_itr)
                    self.agent.replay_memory.update_alpha_and_beta(
                        delta, actor_num, sub_train_itr)
                    pre_train_itr = learn_count
                    pre_score = actor_score
                    # scores.append(score)
                if replay_full_episode is not None:
                    print(
                        "episode: {}  trained-episode: {}  score: {}  memory length: {}  epsilon: {}"
                        .format(e, e - replay_full_episode, actor_score,
                                len(self.agent.replay_memory),
                                self.agent.epsilon))
                    writeLog(Config.ACTOR_DATA_PATH + 'episodescore/',
                             episode_log,
                             [str(episode_count),
                              str(actor_score)])

                # 주기적으로 에피소드의 gif 를 저장하고, 모델 파라미터와 요약 통계량을 저장한다.
                #if episode_count % Config.GIF_STEP == 0 and episode_count != 0 :
                #    time_per_step = 0.05
                #    images = np.array(episode_frames)
                #    make_gif(images, './frames/dqfd_image' + str(episode_count) + '.gif',
                #             duration=len(images) * time_per_step, true_image=True, salience=False)
                actor_done, actor_score, actor_n_step_reward, actor_state = False, 0, None, self.env.reset(
                )
                t_q_actor = deque(maxlen=Config.trajectory_n)
                episode_count = episode_count + 1
                episode_frames = []

            if (episodeEnd):
                # handle transitions left in t_q

                print("human : episode end")
                t_q_human.popleft()  # first transition's n-step is already set
                transitions = set_n_step(t_q_human, Config.trajectory_n,
                                         self.agent.time_step)
                for t in transitions:
                    self.agent.perceive(t, self.agent.time_step)
                if self.agent.replay_memory.full():
                    if train_itr % Config().UPDATE_TARGET_NET == 0:
                        #print("human_update_target")
                        self.agent.sess.run(self.agent.update_target_net)
                epsidoe_list_count += 1
                if (epsidoe_list_count == self.episodeList.__len__()):
                    random.shuffle(self.episodeList)
                    epsidoe_list_count = 0
                    self.episode = self.episodeList[epsidoe_list_count]
                else:
                    self.episode = self.episodeList[epsidoe_list_count]
                self.i, self.f = goNextEpisode(self.i, self.f, self.episode)
                human_done, human_score, human_n_step_reward, human_state = False, 0, None, np.zeros(
                    [83, 83, 3], dtype=np.float32)
                t_q_human = deque(maxlen=Config.trajectory_n)
                episodeEnd = False

            e += 1
        print("actor end")