Beispiel #1
0
    def __init__(self, model, config, criterion, optimizer, dataset, model_dir):
        """Initializes a BaseTrainer object
        
        Arguments:
            model {torch.nn.Module} -- PyTorch neural network model
            config {Configuration} -- Configuration object of settings.
            criterion {nn.loss} -- Loss function to optimize.
            optimizer {torch.optim} -- Optimizer object
            dataset {torch.utils.data.Dataset} -- A dataset object to use with dataloaders
            model_dir {str} -- Directory path to the model
        """

        self.config     = config

        self.model      = model
        self.dataset    = dataset
        self.criterion  = criterion
        self.optimizer  = optimizer

        self.loss       = []
        self.parameters = 3

        self.save_path  = os.path.join(model_dir, "states")

        self.tensorboard = utils.str_to_bool(self.config.tensorboard)

        self.cuda       = utils.str_to_bool(self.config.cuda)
        self.gpu        = str(self.config.gpu)
        self.verbose    = utils.str_to_bool(self.config.verbose)
        self.validation = utils.str_to_bool(self.config.validation)
        self.clip       = self.config.clip
        self.mode       = self.config.mode

        self.batch_size     = self.config.params.batch_size
        self.n_epochs       = self.config.params.n_epochs
        self.train_fraction = self.config.params.train_fraction

        
        self.epochs = range(1, self.n_epochs+1)

        self.train_loader, self.val_loader = self._split_data(self.dataset, self.validation, self.train_fraction)

        # If logging with tensorboard, create a new log directory and remove if existing.
        if self.tensorboard:
            tensorboard_dir = os.path.join(model_dir, "logs/")

            if not os.path.exists(tensorboard_dir):
                os.makedirs(tensorboard_dir)
            else:
                try:
                    shutil.rmtree(tensorboard_dir)
                except OSError as e:
                    print ("Error: %s - %s." % (e.filename, e.strerror))



            self.writer = SummaryWriter(tensorboard_dir)
Beispiel #2
0
 def get_dnd(self):
     tree = self.xsi_get("services/donotdisturb")
     active, splash = [
         utils.node_value(tree, x) for x in ("active", "ringSplash")
     ]
     return {
         "active": utils.str_to_bool(active),
         "ringSplash": utils.str_to_bool(splash)
     }
Beispiel #3
0
def event_csv_loader(filestr):
    with open(filestr, "r") as csvfile:
        mydict = {}
        reader = csv.reader(csvfile, delimiter=',')
        next(reader)
        for row in reader:
            mydict[row[0]] = {
                "event_type": row[2].strip(),
                "actor_type": row[1].strip(),
                "can_aoe": str_to_bool(row[3].strip()),
                "panic_blocked": str_to_bool(row[4].strip())
            }
        return mydict
Beispiel #4
0
 def get_call_forwarding(self, forward_type="always"):
     """Get call forwarding status for given forward type.
     Returns dict with keys 'active' (bool) and 'number' (str)
     """
     tree = self.xsi_get("services/callforwarding%s" % forward_type)
     active, number = [
         utils.node_value(tree, x)
         for x in ("active", "forwardToPhoneNumber")
     ]
     res = {"active": utils.str_to_bool(active), "number": number or ""}
     if forward_type == "always":
         splash = utils.node_value(tree, "ringSplash")
         res["ringSplash"] = utils.str_to_bool(splash)
     return res
Beispiel #5
0
    def __init__(self):
      
        mishmash_config_reader = MishmashConfigReader(MishmashGrpcConfig.CONFIG_FILE_PATHS,
                                                      MishmashGrpcConfig.ENV_VARIABLE_NAMES,
                                                      MishmashGrpcConfig.NOT_REQUIRED_VARIABLE_NAMES)
                                                      
        self.__raw_config = mishmash_config_reader.get_configuration()

        if not isinstance(self.__raw_config["MISHMASHIO_AUTH_METHOD"], str):
            raise MishmashWrongCredentialsException(f"please provide valid MISHMASHIO_AUTH_METHOD")
        
        self.__auth_method = self.__raw_config["MISHMASHIO_AUTH_METHOD"]
        self.__is_ssl = str_to_bool(self.__raw_config["MISHMASHIO_USE_SSL"])

        if self.__is_ssl:
            if "MISHMASH_CERT_FILE_PATH" not in self.__raw_config:
                raise  MishmashWrongCredentialsException(f"please provide valid MISHMASH_CERT_FILE_PATH")
            
            self.__trusted_certs = load_credential_from_file( self.__raw_config["MISHMASH_CERT_FILE_PATH"])


        self.__server_address_list = parse_server_list(self.__raw_config["MISHMASHIO_SERVERS"], 
                                                    self.__is_ssl, 
                                                    MishmashGrpcConfig.DEFAULT_SSL_PORT, 
                                                    MishmashGrpcConfig.DEFAULT_PORT )

        self.__auth_plugin = self.__get_authenticaiton_plugin()
Beispiel #6
0
Datei: core.py Projekt: AKJ/sage
def set_config(command, param):
	available_commands = {
		'newline_on_ga': 'bool',
		'health_sip_percentage': 'int',
		'eat_lestagii': 'bool',
		'basic_lestagii_health_percentage': 'int',
		'combat_lestagii_health_percentage': 'int',
		'basic_lestagii_mana_percentage': 'int',
		'combat_lestagii_mana_percentage': 'int'
	}

	if command in available_commands.keys():
		if available_commands[command] is 'bool':
			param = utils.str_to_bool(param)
		elif available_commands[command] is 'int':
			try:
				param = int(param)
			except ValueError:
				echo('Option "' + command + '" expects a numeric value')
				return False

		config[command] = param
		echo('Config option ' + command + ' updated')
	else:
		echo("Invalid configuration option")
Beispiel #7
0
	def on_key_pressed(self, key):
		if str_to_bool(self.db.get('IS_HUNG_UP')):
			return { 'ok' : False, 'error' : "Phone is hung up" }

		# play dmtf tone;
		command = { "press" : self.map_key_to_tone(key) }
		
		mode = int(self.db.get('MODE'))
		logging.info("CURRENT MODE: %d" % mode)

		if mode == GATHER_MODE:
			logging.info("THIS IS A GATHER: %d" % key)

			try:
				gathered_keys = json.loads(self.db.get('gathered_keys'))
				print gathered_keys
			except Exception as e:
				logging.warning("could not get gathered_keys: ")
				print e, type(e)

				gathered_keys = []

			gathered_keys += [key]
			self.db.set('gathered_keys', json.dumps(gathered_keys))

		return self.send_command(command)
Beispiel #8
0
def index_vnf(fields, filename, final_path, new_version):
    """
    Indexing function for VNFs
    """

    user = get_user()
    fields['user'] = user
    fields['visibility'] = str_to_bool(request.form.get('visibility', 1))
    data_ind = {'name': fields['name'], 'description': fields['description'], 'vendor': fields['vendor'],
                'path': fields['path']}
    copyfile(filename,
             final_path + '/' + fields.get('id') + "-" + fields.get('version') + '.tar.gz')
    yaml.dump(fields, open(final_path + '/' + 'metadata.yaml', 'w'))
    index = yaml.load(open('/repository/index.yaml'), Loader=yaml.FullLoader)

    if new_version:
        index['vnf_packages'][fields.get('id')][fields.get('version')] = data_ind
        if version.parse(index['vnf_packages'][fields.get('id')]['latest']) < version.parse(
                fields.get('version')):
            index['vnf_packages'][fields.get('id')]['latest'] = fields.get('version')
    else:
        index['vnf_packages'][fields.get('id')] = {fields.get('version'): data_ind}
        index['vnf_packages'][fields.get('id')]['latest'] = fields.get('version')
        if not fields['visibility']:
            private_packages = dbclient['private_packages']['vnf']
            private_packages.insert_one({'id': fields.get('id'), 'user': user})
        dependencies = dbclient['dependencies']['vnf']
        dependencies.insert_one({'id': fields.get('id'), 'images': fields.get('images')})

    yaml.dump(index, open('/repository/' + 'index.yaml', 'w'))
Beispiel #9
0
    def get(self):
        custom_filter = SaleList.parser.parse_args()
        response = []

        if custom_filter.get('deleted'):
            sales =  SaleModel.filter_by_deleted(str_to_bool(custom_filter.get('deleted')))
        else:
            sales = SaleModel.find_all()

        for sale in sales:
            products_from_sale = SoldProductModel.find_by_sale_id(sale.sale_id)
            # payments_from_sale = TransactionModel.find_by_sale_id(sale.sale_id)

            sale_element = {"sale_id": sale.sale_id,
                            "date": str(sale.date)[:19],
                            "client": sale.client.json(),
                            "total": sale.total,
                            "discount": sale.discount,
                            "user_commission": sale.user_commission,
                            "promoter_commission": sale.promoter_commission,
                            "deleted": sale.deleted,
                            # "payments": [payment.json() for payment in payments_from_sale],
                            "products": [product.json() for product in products_from_sale]}

            if sale.promoter:
                sale_element["promoter"] = sale.promoter.json()
            if sale.user:
                sale_element["seller"] = sale.user.json()

            response.append(sale_element)

        return response
Beispiel #10
0
def test_str_to_bool():
    assert str_to_bool('true') == True
    assert str_to_bool('false') == False
    assert str_to_bool('TRUE') == True
    assert str_to_bool('FALSE') == False
    assert str_to_bool('FAIL') is None
    assert str_to_bool('Fail', True) == True
    assert str_to_bool(None, True) == True
    assert str_to_bool('Fail', 'Oh!') == 'Oh!'
    def get(self):
        custom_filter = TransactionList.parser.parse_args()

        if custom_filter.get('deleted'):
            return [transaction.json() for transaction
                    in TransactionModel.filter_by_deleted(str_to_bool(custom_filter.get('deleted')))]
        else:
            return [transaction.json() for transaction in TransactionModel.find_all()]
Beispiel #12
0
 def ResolveVarToBool(self, in_var, default=False):
     retVal = default
     try:
         resolved_var = self.ResolveVarToStr(in_var)
         retVal = utils.str_to_bool(resolved_var, default)
     except:
         pass
     return retVal
Beispiel #13
0
 def get_remote_office(self):
     """Get Remote Office call setting
     """
     tree = self.xsi_get("services/remoteoffice")
     active, number = [
         utils.node_value(tree, x) for x in ("active", "remoteOfficeNumber")
     ]
     return {"active": utils.str_to_bool(active), "number": number or ""}
Beispiel #14
0
	def on_pick_up(self):
		logging.info("picking up")

		self.reset_for_call()

		p = Process(target=self.run_script)
		p.start()

		return { 'ok' : not str_to_bool(self.db.get('IS_HUNG_UP')) }
Beispiel #15
0
 def get_broadworks_anywhere(self):
     """Get BroadWorks Anywhere call setting.
     """
     tree = self.xsi_get("services/broadworksanywhere")
     dial, paging = [
         utils.node_value(tree, x)
         for x in ("alertAllLocationsForClickToDialCalls",
                   "alertAllLocationsForGroupPagingCalls")
     ]
     return {
         "alertAllLocationsForClickToDialCalls":
         utils.str_to_bool(dial),
         "alertAllLocationsForGroupPagingCalls":
         utils.str_to_bool(paging),
         "locations":
         map(lambda elem: utils.node_list(elem),
             tree.findall(utils.ns_escape("locations/location")))
     }
Beispiel #16
0
    def __init__(self, **kwargs):
        """
    Constructs a Fabric object
    """
        # implementations must respect this
        self.partial_cables = utils.str_to_bool(
            kwargs.get('partial_cables', '0'))

        self._routers = {}  # radix->[router, count]
        self._cables = {}  # actual_length->[cable,count]

        self._cable_granularity = utils.meters(
            kwargs.get('cable_granularity', '0.5m'))
 def set_options_config(self, options):
     self.BACKLOG_ENABLED = str_to_bool(options["BACKLOG_ENABLED"])
     self.WEEKLY_ARCHIVE = str_to_bool(options["WEEKLY_ARCHIVE"])
     self.DAILY_RESET = str_to_bool(options["DAILY_RESET"])
     self.UPDATE_DURATION = str_to_bool(options["UPDATE_DURATION"])
     self.WEEKLY_BURN_CHART = str_to_bool(options["WEEKLY_BURN_CHART"])
     self.STORY_POINTS_ENABLED = str_to_bool(
         options["STORY_POINTS_ENABLED"])
     self.BURN_CHART_TYPE = options["BURN_CHART_TYPE"]
Beispiel #18
0
    def get_broadworks_mobility(self):
        """Get BroadWorks Mobility call setting.

        Returns active state and primary mobile number only.
        """
        tree = self.xsi_get("services/broadworksmobility")
        active = utils.node_value(tree, "active")
        # xpath looks more complex because can't specify default
        # namespece (without prefix) in xpath function arguments
        number = tree.xpath("/*[name()='BroadWorksMobility']"
                            "/*[name()='mobileIdentity']"
                            "/*[name()='primary'][text() = 'true']"
                            "/parent::*"
                            "/*[name()='mobileNumber']/text()")
        phone = number[0] if number else ""
        return {"active": utils.str_to_bool(active), "mobileNumber": phone}
Beispiel #19
0
 def get_simultaneous_ring(self, **kwargs):
     """Get Simultaneous Ring call setting.
     """
     tree = self.xsi_get("services/simultaneousringpersonal")
     active, ring = [
         utils.node_value(tree, x) for x in ("active", "incomingCalls")
     ]
     return {
         "active":
         utils.str_to_bool(active),
         "incomingCalls": (ring == "Do not Ring if on a Call"),
         "simRingLocations":
         map(
             lambda elem: utils.node_list(elem),
             tree.findall(
                 utils.ns_escape("simRingLocations/simRingLocation")))
     }
Beispiel #20
0
	def on_hang_up(self):
		logging.info("hanging up")
		self.send_command({ 'stop_audio' : True })
		stop_daemon(self.conf['d_files']['module'])
		self.db.set('IS_HUNG_UP', True)

		try:
			current_record_pid = self.db.get("CURRENT_RECORD_PID")
			if current_record_pid is not None:
				logging.debug("current_record_pid: %s" % current_record_pid)
				os.kill(int(current_record_pid), signal.SIGKILL)
				self.db.delete("CURRENT_RECORD_PID")
			else:
				logging.debug("not currently recording")

		except Exception as e:
			logging.warning("NO CURRENT RECORD TO KILL")
			print e, type(e)

		return { 'ok' : str_to_bool(self.db.get('IS_HUNG_UP')) }
Beispiel #21
0
def delete_nsd(nsdId):
    """
    Network Service Delete on OSM and repository
    """
    try:
        logger.info(request)
        all = str_to_bool(request.form.get('all', 0))
        user = get_user()
        index = yaml.load(open('/repository/index.yaml'), Loader=yaml.FullLoader)
        ns = index.get('ns_packages').get(nsdId, {})
        if not len(ns):
            raise Exception('Network Service not found')
        latest = ns['latest']
        ns_metadata = yaml.load(open('/repository/ns/' + nsdId + '/' + latest + '/metadata.yaml'),
                                Loader=yaml.FullLoader)
        if user != ns_metadata['user']:
            raise Exception('User {} have not uploaded this Network Service. This user has not permissions for deleting'
                            ' this'.format(user))

        ns_id = dbclient['onboarded']['ns'].find_one({'ns': nsdId})
        if ns_id:
            nbiUtil.delete_nsd(ns_id.get('nsid'))

        logger.info('NS {} deleted in OSM'.format(nsdId))
        if all or len(ns) == 2:
            del index['ns_packages'][nsdId]
            rmtree('/repository/ns/' + nsdId)

        else:
            del index['ns_packages'][nsdId][latest]
            rmtree('/repository/ns/' + nsdId + '/' + latest)

        yaml.dump(index, open('/repository/' + 'index.yaml', 'w'))
        dbclient['onboarded']['ns'].delete_one({'ns': nsdId})
        dbclient['private_packages']['nsd'].delete_one({'id': nsdId})
        return jsonify({}), 204
    except Exception as e:
        return jsonify({"detail": str(e), "code": type(e).__name__, "status": 400}), 400
Beispiel #22
0
    def op_size(self, op, is_right_click=False):
        """ 更改分辨率和屏幕密度操作 """
        serial = self.serial
        size_str = ''
        density_str = ''

        # 还原
        if op == OP.SIZE_ORIGINAL:
            size_arr = util_atool.get_resolution(serial)
            density_arr = util_atool.get_density(serial)
            size_str = size_arr[0]
            density_str = density_arr[0]

            # 右键只进行查询
            if is_right_click:
                s = "分辨率信息为:\n宽x高:{0},DPI:{1}".format(size_str, density_str)
                s += "\nsize:{0},density:{1}".format(size_arr, density_arr)
                utils.showinfo(s)
                return
            else:
                s = "分辨率 还原,size:{0}\tdensity:{1}".format(
                    size_arr, density_arr)
                print(s)
                self.msg(s)

        # 等比缩放
        elif op == OP.SIZE_SCALE:
            scale = 3 if is_right_click else 2

            size_arr = util_atool.get_resolution(serial)
            s = size_arr[0]
            arr = s.split("x")
            w = int(arr[0])
            h = int(arr[1])
            # density_str = density_arr[0]
            size_str = "{0}x{1}".format(w * scale, h * scale)

            ps = "分辨率 等比缩放{0}倍, 仅更改宽x高".format(scale)
            ps += "\nsize:{0}\t{1}".format(size_arr, size_str)
            print(ps)
            self.msg(ps)

        # 自定义
        elif op == OP.SIZE_CUSTOM:
            jf = setting.read_setting()
            if utils.str_to_bool(jf["size_on"]):
                size_str = jf["size"]
            if utils.str_to_bool(jf["density_on"]):
                density_str = jf['density']

            if is_right_click:
                arr = [
                    "分辨率 自定义配置信息:",
                    "\n宽x高:{0}, 是否启用:{1}".format(jf["size"], jf["size_on"]),
                    "\nDPI:{0}, 是否启用:{1}".format(jf["density"],
                                                 jf["density_on"]),
                ]
                utils.showinfo("".join(arr))
                return
            else:
                s = "设置为 自定义分辨率,size:{0},density:{1}".format(
                    size_str, density_str)
                print(s)
                self.msg(s)

        if size_str:
            self.th(util_atool.set_resolution, (serial, size_str, density_str))
Beispiel #23
0
# !/usr/bin/python2
""" DATABASE """

__all__ = ['database']

# Import required python libraries
import os, sys
import sqlite3
from utils import sprint, eprint, str_to_bool

count = 0
folder = '/var/sqlite'
file = os.getenv('DB_FILE')
path = os.path.join(folder, file)
in_memory = str_to_bool(os.getenv('DB_MEMORY'))
if in_memory:
    database = ":memory:"
else:
    database = path
save_on_exit = str_to_bool(os.getenv('DB_SAVE'))

# Init database
if not os.path.exists(folder):
    os.makedirs(folder)
try:
    with sqlite3.connect(database) as sql:
        sql.execute(
            "CREATE TABLE IF NOT EXISTS queue(ip TEXT PRIMARY KEY, lock BOOL DEFAULT 0, hits INTEGER DEFAULT 0, date DATETIME);"
        )
        sql.execute(
            "CREATE TABLE IF NOT EXISTS auth(ip TEXT PRIMARY KEY, snmp_version TEXT, snmp_community TEXT, snmp_aprot TEXT, snmp_apass TEXT, snmp_seng_id TEXT, snmp_ceng_id TEXT, snmp_level TEXT, snmp_context TEXT, snmp_user TEXT, snmp_pprot TEXT, snmp_ppass TEXT, snmp_boots TEXT);"
Beispiel #24
0
    def __init__(self, **kwargs):
        super(Hyperx2DStripe, self).__init__(**kwargs)

        # mandatory
        self._concentration = None
        self._widths = None
        self._weights = None
        self._chassis = None  # chassis per rack

        # optional
        self._rack_stripe = False

        # parse kwargs
        for key in kwargs:
            if key == 'concentration':
                assert self._concentration == None, 'duplicate concentration'
                self._concentration = int(kwargs[key])
            elif key == 'widths':
                assert self._widths == None, 'duplicate widths'
                self._widths = utils.str_to_int_list(kwargs[key])
            elif key == 'weights':
                assert self._weights == None, 'duplicate weights'
                self._weights = utils.str_to_int_list(kwargs[key])
            elif key == 'chassis':
                assert self._chassis == None, 'duplicate chassis'
                self._chassis = int(kwargs[key])
            elif key == 'rack_stripe':
                self._rack_stripe = utils.str_to_bool(kwargs[key])
            elif key in super(Hyperx2DStripe, self).using_options():
                pass
            else:
                assert False, 'unknown option key: {}'.format(key)

        # check mandatory were given
        assert (self._concentration != None and self._widths != None and
                self._weights != None and self._chassis != None), \
                'concentration, widths, weights, and chassis must all be specified'

        # check sizes
        if len(self._widths) != len(self._weights) and len(self._widths) > 0:
            raise ValueError('HyperX widths and weight must be equal length')
        assert len(
            self._widths) <= 2, 'This topology supports up to 2 dimensions'
        for width in self._widths:
            assert width >= 2, 'HyperX widths must be > 1'

        # compute number of routers and nodes
        self._routers = functools.reduce(operator.mul, self._widths)
        self._nodes = self._routers * self._concentration

        # adjust the topology into a 2D topology even if it isn't
        dims = len(self._widths)
        for more_dims in range(dims, 2):
            self._widths.append(1)
            self._weights.append(0)
        assert len(self._widths) == 2

        # determine total number of racks
        self._racks = math.ceil(
            (self._widths[0] * self._widths[1]) / self._chassis)

        # lengths
        self._lenmax = 0
        self._lenmin = 9999999
        self._lensum = 0
        self._cblcnt = 0
Beispiel #25
0
# !/usr/bin/python2
""" DEVICE CONFIGURATION FILES """

__all__ = ['options_snmp', 'options_config', 'options_database', 'options_threads']

# Import required python libraries
import os, sys
import time
from shutil import copyfile
from subprocess import Popen, PIPE
from stat import ST_MTIME
from utils import eprint, sprint, str_to_bool, ConfigFileError, DiffProcedureError

compare = str_to_bool(os.getenv('CONFIG_COMPARE'))
""" generate diff file for configurations in one folder """
no_duplicates = str_to_bool(os.getenv('CONFIG_NODUP'))
""" delete new configurations without changes """
folder = os.getenv('CONFIG_FOLDER')
""" folder to store device configurations in 'format' macro, fields syslog, l0ip """
file = os.getenv('CONFIG_FILE')
""" device configuration names in 'format' macro, fields syslog, l0ip, n """
diff = os.getenv('CONFIG_DIFF')
""" device configuration different file name in 'format' macro, fields syslog, l0ip """
_diff_cmp = ['/usr/bin/diff', '-qs']
_diff_out = ['/usr/bin/diff', os.getenv('CONFIG_DIFF_OPT')]

def do_save(src, msg, opt):
    l0ip = get_l0ip(msg['SOURCEIP'])
    dst_folder = get_config_folder(msg, l0ip=l0ip)
    if not os.path.exists(dst_folder):
        os.makedirs(dst_folder)
Beispiel #26
0
def main(*args):

    # load stop words
    stop_words = get_stop_words()

    plot = const.PLOT_DEFAULT
    print_ = const.PRINT_DEFAULT
    max_features = None
    random_state = const.RANDOM_STATE_DEFAULT
    order = -1  # default descending order
    wordcloud_n = None
    wordcloud_ = False
    cos_sim = False
    even_distrib = const.EVEN_DISTRIB_DEFAULT
    plt.rcParams.update({'font.size': const.FONT_SIZE_DEFAULT})
    pre_vec = False
    limit_size = False
    min_df = 1
    max_df = 1.0
    param_compare = False

    # print command line arguments
    for arg in args:
        k = arg.split("=")[0]
        v = arg.split("=")[1]
        if k == 'plot':
            plot = utils.str_to_bool(v)
        elif k == 'print':
            print_ = utils.str_to_bool(v)
        elif k == 'max_features':
            max_features = int(v)
        elif k == 'stop_words':
            if utils.str_to_bool(v) == False:
                stop_words = None
        elif k == 'random_state':
            random_state = int(v)
        elif k == 'order':
            order = int(v)
        elif k == 'wordcloud':
            wordcloud_ = utils.str_to_bool(v)
        elif k == 'wordcloud_n':
            wordcloud_n = int(v)
        elif k == 'cos_sim':
            cos_sim = utils.str_to_bool(v)
        elif k == 'font_size':
            plt.rcParams.update({'font.size': int(v)})
        elif k == 'even_distrib':
            even_distrib = utils.str_to_bool(v)
        elif k == 'pre_vec':
            pre_vec = utils.str_to_bool(v)
        elif k == 'limit_size':
            limit_size = utils.str_to_bool(v)
        elif k == 'min_df':
            min_df = int(v)
        elif k == 'max_df':
            max_df = float(v)
            if max_df > 1:
                max_df = int(max_df)
        elif k == 'param_compare':
            param_compare = utils.str_to_bool(v)
        else:
            print("Unknown param: {}".format(k))

    if print_:
        print()
        print("-- Analysis config --")
        print("even_distrib: {}".format(even_distrib))
        print("stop_words: {}".format(stop_words != None))
        print("max_features: {}".format(max_features))
        print("random_state: {}".format(random_state))
        print("wordcloud: {}".format(wordcloud_))
        print("wordcloud_n: {}".format(wordcloud_n))
        print("order: {}".format(order))
        print("cos_sim: {}".format(cos_sim))
        print("param_compare: {}".format(param_compare))
        print("pre_vec: {}".format(pre_vec))
        print("limit_size: {}".format(limit_size))
        print("min_df: {}".format(min_df))
        print("max_df: {}".format(max_df))
        print("plot: {}".format(plot))
        print("--------------------")
        print()

    gen_spotify_df = pd.read_csv(const.GEN_SPOTIFY)
    clean_spotify_df = pd.read_csv(const.CLEAN_SPOTIFY)
    if even_distrib == False:
        clean_spotify_df = pd.read_csv(const.CLEAN_UNEVEN_SPOTIFY)

    gen_deezer_df = pd.read_csv(const.GEN_DEEZER)
    clean_deezer_df = pd.read_csv(const.CLEAN_DEEZER)
    if even_distrib == False:
        clean_deezer_df = pd.read_csv(const.CLEAN_UNEVEN_DEEZER)

    datasets = [
        (const.SPOTIFY, clean_spotify_df),
        (const.DEEZER, clean_deezer_df),
    ]
    vectorizer = CountVectorizer(
        stop_words=stop_words,
        ngram_range=(1, 1),
        min_df=min_df,
        max_df=max_df,
        max_features=max_features,
        binary=True,
    )

    # word clouds
    if wordcloud_:
        top_n = gen_word_cloud_grid(
            const.SPOTIFY,
            clean_spotify_df,
            vectorizer=vectorizer,
            n=wordcloud_n,
            order=order,
            random_state=random_state,
            print_=print_
        )
        spotify_shared, spotify_unique = get_shared_words(top_n)

        top_n = gen_word_cloud_grid(
            const.DEEZER,
            clean_deezer_df,
            vectorizer=vectorizer,
            n=wordcloud_n,
            order=order,
            random_state=random_state,
            print_=print_
        )
        deezer_shared, deezer_unique = get_shared_words(top_n)

        if print_:
            print()
            print("Spotify: count shared={}".format(
                len(spotify_shared)/len(spotify_unique)))
            print("Deezer: count shared={}".format(
                len(deezer_shared)/len(deezer_unique)))
            print()

    # cosine similarity
    if cos_sim: 
        for name, dataset in datasets:
            if pre_vec:
                dataset = utils.get_vectorized_df(dataset, vectorizer)

            print("{} class data similarity analysis...".format(name))
            for i in dataset.y.unique():
                class_df = utils.get_class_based_data(
                    dataset,
                    i,
                    random_state=random_state,
                    include_other_classes=True,
                    even_distrib=False,
                    limit_size=limit_size,
                    print_=True,
                )
                if pre_vec == False:
                    class_df = utils.get_vectorized_df(class_df, vectorizer)
                pos_df = utils.get_class_based_data(class_df, 1)
                pos_df.pop('y')
                ave_pos = utils.get_average_cos_sim(pos_df.values)
                neg_df = utils.get_class_based_data(class_df, -1.0)
                neg_df.pop('y')
                ave_neg = utils.get_average_cos_sim(neg_df.values)
                ave_between = utils.get_average_cos_sim(
                    pos_df.values, neg_df.values)
                print("class {}".format(i))
                print("data shape: {}".format(class_df.shape))
                print("average positive cosine similarity: {}".format(ave_pos))
                print("average negative cosine similarity: {}".format(ave_neg))
                print("average between cosine similarity: {}".format(ave_between))
                print("(pos - between )+ (neg - between) percentage = {} ".format(
                    (ave_pos - ave_between) / ave_pos + (ave_neg - ave_between)  / ave_neg
                ))
                print()

    if param_compare:
        # min_df vs pos_sim, neg_sim, between_sim
        params_grid = {
            'min_df': [i for i in range(1, 15)],
            'max_df': np.arange(0.1, 1.0, 0.1),
        }

        for name, dataset in datasets:    
            for i in dataset.y.unique():
                df = utils.get_class_based_data(
                    dataset,
                    i,
                    random_state=random_state,
                    include_other_classes=True,
                    even_distrib=False,
                    limit_size=limit_size,
                )
                for p, v in params_grid.items():
                    print("Comparing cosine similarity vs {} for {} Class {} data...".format(p, name, i))
                    vectorizer = CountVectorizer(
                        stop_words=stop_words,
                        ngram_range=(1, 1),
                        min_df=min_df,
                        max_df=max_df,
                        max_features=max_features,
                        binary=True,
                    )
                    pos_sim = []
                    neg_sim = []
                    between_sim = []
                    diff = []
                    for j in range(len(v)):
                        vectorizer.set_params(**{p: v[j]})
                        class_df = utils.get_vectorized_df(df, vectorizer)
                        pos_df = utils.get_class_based_data(class_df, 1)
                        pos_df.pop('y')
                        ave_pos = utils.get_average_cos_sim(pos_df.values)
                        neg_df = utils.get_class_based_data(class_df, -1.0)
                        neg_df.pop('y')
                        ave_neg = utils.get_average_cos_sim(neg_df.values)
                        ave_between = utils.get_average_cos_sim(
                            pos_df.values, neg_df.values)
                        pos_sim.append(ave_pos)
                        neg_sim.append(ave_neg)
                        between_sim.append(ave_between)
                        diff.append((ave_pos - ave_between)/ave_pos + (ave_neg - ave_between)/ave_neg)
                    
                    plt.figure()
                    plt.title("{} Class {}: {} vs cosine similarity".format(name,i, p))
                    pos_sim = np.array(list(zip(v, pos_sim)))
                    neg_sim = np.array(list(zip(v, neg_sim)))
                    between_sim = np.array(list(zip(v, between_sim)))
                    diff = np.array(list(zip(v, diff)))
                    plt.plot(pos_sim[:, 0], pos_sim[:, 1], label='pos sim')
                    plt.plot(neg_sim[:, 0], neg_sim[:, 1], label='neg sim')
                    plt.plot(between_sim[:, 0], between_sim[:, 1], label='between sim')
                    plt.plot(diff[:, 0], diff[:, 1], label='sim difference (%)')
                    plt.xlabel(p)
                    plt.legend()            

    # grid search eval
    if plot:
        plt.draw()
        plt.show()
Beispiel #27
0
	def get_gpio_status(self):
		# maybe this will be something more substantial...
		return str_to_bool(self.db.get('GPIO_STATUS'))
Beispiel #28
0
def cli_main(monitor: str, stat: str, download: str, dir: str):
    monitor = utils.str_to_bool(monitor)
    download = utils.str_to_bool(download)
    assert stat in ['ens', 'mean', 'sprd'], 'stat must be ens, mean, or sprd'
    retr = GEFSRetrieve(download_dir=dir, download=download, monitor=monitor)
    retr.run(stat)
Beispiel #29
0
def load_aliases_from_xml(file):
	xmlfile = parse(file)
	group = xmlfile.getroot()

	new_group = {}

	# Ensure these triggers have a group
	if group.tag != 'group':
		print "load_aliases_from_xml: File %s not contained in a <group>" % file
		return False

	# Gracefully handle group name being omitted
	try:
		new_group['name'] = group.attrib['name']
	except KeyError:
		print "load_aliases_from_xml: Group name not defined in %s" % file
		return False

	# Optionally apply group enabled/disabled
	try:
		new_group['enabled'] = group.attrib['enabled']
		new_group['enabled'] = str_to_bool(new_group['enabled'])
	except KeyError:
		new_group['enabled'] = False

	core.add_alias_group(new_group['name'], new_group['enabled'])

	# Load triggers for group
	for alias in group:
		new_alias = {
			'name': 	None,
			'type': 	None,
			'pattern':	None,
			'enabled':	True,
			'function':	[],
			'send': []
		}

		for node in alias:
			if node.tag == 'name':
				new_alias['name'] = node.text

			if node.tag == 'type':
				new_alias['type'] = node.text

			if node.tag == 'pattern':
				new_alias['pattern'] = node.text

			if node.tag == 'enabled':
				new_alias['enabled'] = str_to_bool(node.text)

			if node.tag == 'function':
				new_alias['function'].append(node.text)

			if node.tag == 'send':
				new_alias['send'].append(node.text)

		# Validate <trigger>
		keys = new_alias.keys()

		functions = []

		# There has to be a better way.....
		if 'name' not in keys or 'type' not in keys or 'pattern' not in keys:
			print "load_alias_from_xml: Required alias element missing in %s" % file
			return False

		# Validate trigger types
		if new_alias['type'] not in ('exact', 'substring', 'regex', 'startswith', 'endswith'):
			print "load_alias_from_xml: Invalid alias type in %s" % file
			return False


		# Figure out enabled
		try:
			if new_alias['enabled'] is False:
				pass
		except KeyError:
			new_alias['enabled'] = False

		if len(new_alias['send']) > 0:
			for send in new_alias['send']:
				functions.append('send ' + send)

		if len(new_alias['function']) > 0:
			for function in new_alias['function']:
				functions.append(function)


		if new_alias['function'] is not None:
			core.add_alias(
				new_alias['name'],
				new_group['name'],
				new_alias['type'],
				new_alias['pattern'],
				functions,
				new_alias['enabled'],
			)
		else:
			print "load_alias_from_xml: No function defined for generic trigger in %s" % file
			return False

	# Done!
	return new_group['name']
import os

from utils import str_to_bool

LR = 0.001
EPOCHS = os.getenv("EPOCHS", 5)
_BATCHSIZE = 64
R_MEAN = 123.68
G_MEAN = 116.78
B_MEAN = 103.94
BUFFER = 256
DEFAULT_IMAGE_SIZE = 224
NUM_CHANNELS = 3
NUM_CLASSES = 1001
NUM_IMAGES = {"train": 1_281_167, "validation": 50000}
NUM_TRAIN_FILES = 1024
SHUFFLE_BUFFER = 100

DATA_LENGTH = int(
    os.getenv("FAKE_DATA_LENGTH", 1_281_167)
)  # How much fake data to simulate, default to size of imagenet dataset

DATASET_NAME = "ImageNet"

DISTRIBUTED = str_to_bool(os.getenv("DISTRIBUTED", "False"))
Beispiel #31
0
def main(*args):

    save = False
    data_size = -1
    spotify_thresh = 0
    deezer_thresh = 0
    class_per_quad = 1
    dist_mode = 'furthest_from_mean'

    for arg in args:
        k = arg.split("=")[0]
        v = arg.split("=")[1]
        if k == 'save':
            save = str_to_bool(v)
        elif k == 'data_size':
            data_size = int(v)
        elif k == 'spotify_thresh':
            spotify_thresh = float(v)
        elif k == 'deezer_thresh':
            deezer_thresh = float(v)
        elif k == 'class_per_quad':
            class_per_quad = int(v)
        elif k == 'dist_mode':
            dist_mode = str(v)

    print()
    print("--- Clean config ---")
    print("save: {}".format(save))
    print("data_size: {}".format(data_size))
    print("spotify_thresh: {}".format(spotify_thresh))
    print("deezer_thresh: {}".format(deezer_thresh))
    print("class_per_quad: {}".format(class_per_quad))
    print("dist_mode: {}".format(dist_mode))
    print("--------------------")
    print()

    # SPOTIFY DATASET
    cleaned_df = clean(const.GEN_SPOTIFY)
    df = gen_labels(
        cleaned_df,
        cross_over_val=0.5,
        thresh=spotify_thresh,
        data_size=data_size,
        class_per_quadrant=class_per_quad,
        dist_mode=dist_mode,
    )
    check_dup(df, "Error: Duplicates!!!!!!!!!!!!!!!!!")

    if save:
        # save as csv (don't include indices in .csv)
        if data_size > 0 and dist_mode != 'furthest_from_mean':
            print("Saving result to {}...\n".format(const.CLEAN_SPOTIFY))
            df.to_csv(const.CLEAN_SPOTIFY, index=False)
        else:
            print("Saving result to {}...\n".format(
                const.CLEAN_UNEVEN_SPOTIFY))
            df.to_csv(const.CLEAN_UNEVEN_SPOTIFY, index=False)

    # DEEZER DATASET
    cleaned_df = clean(
        const.GEN_DEEZER,
        # word_count_range=(30, 600),
        # unique_words_range=(10, 300)
    )
    df = gen_labels(
        cleaned_df,
        cross_over_val=0,
        thresh=deezer_thresh,
        data_size=data_size,
        class_per_quadrant=class_per_quad,
        dist_mode=dist_mode,
    )
    check_dup(df, "Error: Duplicates!!!!!!!!!!!!!!!!!")
    if save:
        # save as csv (don't include indices in .csv)
        if data_size > 0 and dist_mode != 'furthest_from_mean':
            print("Saving result to {}...".format(const.CLEAN_DEEZER))
            df.to_csv(const.CLEAN_DEEZER, index=False)
        else:
            print("Saving result to {}...".format(const.CLEAN_UNEVEN_DEEZER))
            df.to_csv(const.CLEAN_UNEVEN_DEEZER, index=False)
Beispiel #32
0
def load_triggers_from_xml(file):
	xmlfile = parse(file)
	group = xmlfile.getroot()

	new_group = {}

	# Ensure these triggers have a group
	if group.tag != 'group':
		print "load_triggers_from_xml: File %s not contained in a <group>" % file
		return False

	# Gracefully handle group name being omitted
	try:
		new_group['name'] = group.attrib['name']
	except KeyError:
		print "load_triggers_from_xml: Group name not defined in %s" % file
		return False

	# Optionally apply group enabled/disabled
	try:
		new_group['enabled'] = group.attrib['enabled']
		new_group['enabled'] = str_to_bool(new_group['enabled'])
	except KeyError:
		new_group['enabled'] = False

	core.add_trigger_group(new_group['name'], new_group['enabled'])

	# Load triggers for group
	for trigger in group:
		new_trigger = {
			'name': None,
			'type': None,
			'pattern': None,
			'enabled': True,
			'function': [],
			'pfunction': [],
			'pdisable_trigger': [],
			'affliction': [],
			'cure': [],
			'bcure': [],
			'symptom': [],
			'gag': [],
			'enable_group': [],
			'disable_group': [],
			'enable_trigger': [],
			'disable_trigger': [],
			'ondef': [],
			'undef': [],
			'send': [],
			'deflist': None
		}

		for node in trigger:
			if node.tag == 'name':
				new_trigger['name'] = node.text

			elif node.tag == 'type':
				new_trigger['type'] = node.text

			elif node.tag == 'pattern':
				new_trigger['pattern'] = node.text

			elif node.tag == 'enabled':
				new_trigger['enabled'] = str_to_bool(node.text)

			elif node.tag == 'function':
				new_trigger['function'].append(node.text)

			elif node.tag == 'pfunction':
				new_trigger['pfunction'].append(node.text)

			elif node.tag == 'pdisable_trigger':
				new_trigger['pdisable_trigger'].append(node.text)

			elif node.tag == 'affliction':
				new_trigger['affliction'].append(node.text)

			elif node.tag == 'symptom':
				new_trigger['symptom'].append(node.text)

			elif node.tag == 'cure':
				new_trigger['cure'].append(node.text)

			elif node.tag == 'bcure':
				new_trigger['bcure'].append(node.text)

			elif node.tag == 'gag':
				new_trigger['gag'].append(node.text)

			elif node.tag == 'send':
				new_trigger['send'].append(node.text)

			elif node.tag == 'enable_group':
				new_trigger['enable_group'].append(node.text)

			elif node.tag == 'disable_group':
				new_trigger['disable_group'].append(node.text)

			elif node.tag == 'enable_trigger':
				new_trigger['enable_trigger'].append(node.text)

			elif node.tag == 'disable_trigger':
				new_trigger['disable_trigger'].append(node.text)

			elif node.tag == 'undef':
				new_trigger['undef'].append(node.text)

			elif node.tag == 'ondef':
				new_trigger['ondef'].append(node.text)

			elif node.tag == 'deflist':
				new_trigger['deflist'] = node.text

		# Validate <trigger>
		keys = new_trigger.keys()

		# There has to be a better way.....
		if 'name' not in keys or 'type' not in keys or 'pattern' not in keys:
			print "load_triggers_from_xml: Required trigger element missing in %s" % file
			return False

		# Validate trigger types
		if new_trigger['type'] not in ('exact', 'substring', 'regex', 'startswith', 'endswith'):
			print "load_triggers_from_xml: Invalid trigger type in %s" % file
			return False


		# Figure out enabled
		try:
			if new_trigger['enabled'] is False:
				pass
		except KeyError:
			new_trigger['enabled'] = False

		functions = []

		if len(new_trigger['affliction']) > 0:
			for affliction in new_trigger['affliction']:
				functions.append('affliction ' + affliction)

		if len(new_trigger['symptom']) > 0:
			for symptom in new_trigger['symptom']:
				functions.append('symptom ' + symptom)

		if len(new_trigger['cure']) > 0:
			for cure in new_trigger['cure']:
				functions.append('cure ' + cure)

		if len(new_trigger['bcure']) > 0:
			for bcure in new_trigger['bcure']:
				functions.append('bcure ' + bcure)

		if len(new_trigger['gag']) > 0:
			for gag in new_trigger['gag']:
				functions.append('gag ' + gag)

		if len(new_trigger['send']) > 0:
			for send in new_trigger['send']:
				functions.append('send ' + send)

		if len(new_trigger['enable_group']) > 0:
			for group in new_trigger['enable_group']:
				functions.append('enable_group ' + group)

		if len(new_trigger['disable_group']) > 0:
			for group in new_trigger['disable_group']:
				functions.append('disable_group ' + group)

		if len(new_trigger['enable_trigger']) > 0:
			for trigger in new_trigger['enable_trigger']:
				functions.append('enable_trigger ' + trigger)

		if len(new_trigger['disable_trigger']) > 0:
			for trigger in new_trigger['disable_trigger']:
				functions.append('disable_trigger ' + trigger)

		if len(new_trigger['ondef']) > 0:
			for defence in new_trigger['ondef']:
				functions.append('ondef ' + defence)

		if len(new_trigger['undef']) > 0:
			for defence in new_trigger['undef']:
				functions.append('undef ' + defence)

		if new_trigger['deflist'] is not None:
			functions.append('deflist ' + new_trigger['deflist'])

		if len(new_trigger['function']) > 0:
			for function in new_trigger['function']:
				functions.append(function)

		if len(new_trigger['pfunction']) > 0:
			for function in new_trigger['pfunction']:
				functions.append('pfunction ' + function)

		if len(new_trigger['pdisable_trigger']) > 0:
			for trigger in new_trigger['pdisable_trigger']:
				functions.append('pdisable_trigger ' + trigger)

		if len(functions) == 0:
			print "load_triggers_from_xml: No function defined for '%s' in %s" % new_trigger['name'], file
			#print "load_triggers_from_xml: No function defined for generic trigger in %s" % file

		core.add_trigger(
			new_trigger['name'],
			new_group['name'],
			new_trigger['type'],
			new_trigger['pattern'],
			functions,
			new_trigger['enabled'],
		)

	# Done!
	return new_group['name']
import os.path
import requests
from requests.auth import HTTPBasicAuth
from StringIO import StringIO

from utils import str_to_bool

try:
    from settings import *
except ImportError:
    GDP_BACKUP_HOST = os.environ['GDP_BACKUP_HOST']
    GDP_BACKUP_USER = os.environ['GDP_BACKUP_USER']
    GDP_BACKUP_PASSWORD = os.environ['GDP_BACKUP_PASSWORD']

    GDP_BACKUP_FORMAT = os.environ['GDP_BACKUP_FORMAT']
    GDP_BACKUP_IS_INCREMENTIVE = str_to_bool(os.environ['GDP_BACKUP_IS_INCREMENTIVE'])
    GDP_BACKUP_IS_NEW_FOLDER_PER_RUN = str_to_bool(os.environ['GDP_BACKUP_IS_NEW_FOLDER_PER_RUN'])

    GDP_BACKUP_DIR = os.environ['GDP_BACKUP_DIR']
    GDP_BACKUP_STATUS_FILE = os.environ['GDP_BACKUP_STATUS_FILE']


def get_backup_folder():
    files_dir = GDP_BACKUP_DIR
    if GDP_BACKUP_IS_NEW_FOLDER_PER_RUN:
        files_dir = '%s/%s' % (GDP_BACKUP_DIR, datetime.datetime.now().strftime('%m%d%y%H%M%S'))
        #files_dir = '%s/%s' % (GDP_BACKUP_DIR, datetime.datetime.now().strftime('%y%m%d%H%M%S'))
        if not os.path.exists(files_dir):
            os.makedirs(files_dir)
            logging.debug("Backup folder created: %s" % files_dir)
    return files_dir
Beispiel #34
0
Datei: core.py Projekt: AKJ/sage
def bootstrap():
	global config
	sys.stdout.write("\nAvalon SAGE %s\n" % VERSION)
	sys.stdout.write("(c) 2010-2011 Matt Davis, Paul Shahid, Todd Wilson, (C) 2013 Andrew Johnson\nAll rights reserved, all wrongs revenged.\n\n")

	if debug_mode:
		sys.stdout.write("            >>> DEBUGGING MODE <<<\n\n")


	# Load Config
	config = ConfigParser.ConfigParser()
	if os.path.isfile(config_file) is False:
		sys.exit('Error: Unable to locate your configuration file (%s)' % config_file)
	config.read(config_file)

	# Pull config out of file and into dictionary "options"
	options = {
		# general
		'class': config_get('general', 'class'),
		'name': config_get('general', 'name'),
		'unknown_affliction_ticks': config_getint('general', 'unknown_affliction_ticks'),
		# connection
		'host':	config_get('connection', 'host'),
		'port':	config_getint('connection', 'port'),
		'listen_on': config_getint('connection', 'listen_on'),
		# display
		'newline_on_ga': config_getboolean('display', 'newline_on_ga'),
		#logging
		'logging_enabled': config_getboolean('logging', 'logging_enabled'),
		# healing
		'health_sip_percentage': config_getint('healing', 'health_sip_percentage'),
		'mana_sip_percentage': config_getint('healing', 'mana_sip_percentage'),
		'eat_lestagii': config_getboolean('healing', 'eat_lestagii'),
		'basic_lestagii_health_percentage': config_getint('healing', 'basic_lestagii_health_percentage'),
		'combat_lestagii_health_percentage': config_getint('healing', 'combat_lestagii_health_percentage'),
		'basic_lestagii_mana_percentage': config_getint('healing', 'basic_lestagii_mana_percentage'),
		'combat_lestagii_mana_percentage': config_getint('healing', 'combat_lestagii_mana_percentage'),
		'herb_balance_delay': config_getfloat('healing', 'herb_balance_delay'),
		# pipes
		'elm': config_get_optional('pipes', 'elm'),
		'artyelm': config_get_optional('pipes', 'artyelm'),
		'malloran': config_get_optional('pipes', 'malloran'),
		'artymalloran': config_get_optional('pipes', 'artymalloran'),
		'valerian': config_get_optional('pipes', 'valerian'),
		'artyvalerian': config_get_optional('pipes', 'artyvalerian'),
		'min_elm': config_getint('pipes', 'min_elm'),
		'min_valerian': config_getint('pipes', 'min_valerian'),
		'min_malloran': config_getint('pipes', 'min_malloran'),
		# antiillusion
		'focus_on_impatience_mana_threshold': config_getint('antiillusion', 'focus_on_impatience_mana_threshold'),
	}

	config_modules = config.items('modules')
	config_herbsout = config.items('herbsout')
	default_sections = ['connection', 'general', 'display', 'logging', 'healing', 'pipes', 'herbsout', 'antiillusion', 'modules']
	classdefssections = [x for x in config.sections() if x not in default_sections]
	classdeflist = [config.items(section) for section in classdefssections]
	classdefs = []
	for x in classdeflist:
		for y in x:
			classdefs.append(y)

	del default_sections, classdefssections, classdeflist
	config = options
	options['herbsout'] = {}
	for herb in config_herbsout:
		options['herbsout'][herb[0]] = int(herb[1])

	player.name = config['name']
	player.lestagii_health_percentage = config['basic_lestagii_health_percentage']
	player.lestagii_mana_percentage = config['combat_lestagii_mana_percentage']
	player.combat_class = config['class']
	if config['artyelm'] == None:
		player.elm_pipe = config['elm']
		player.arty_elm = False
	else:
		player.elm_pipe = config['artyelm']
		player.arty_elm = True

	if config['artymalloran'] == None:
		player.malloran_pipe = config['malloran']
		player.arty_malloran = False
	else:
		player.malloran_pipe = config['artymalloran']
		player.arty_malloran = True

	if config['artyvalerian'] == None:
		player.valerian_pipe = config['valerian']
		player.arty_valerian = False
	else:
		player.valerian_pipe = config['artyvalerian']
		player.arty_valerian = True

	_reset_deflist()

	for option in classdefs:
		if option[0] == 'simultaneity':
			player.simultaneity = utils.str_to_bool(option[1])
			continue
		elif option[0] == 'purity_necklace':
			continue # need to actually make that...

		flags = option[1]

		try:
			player.basic[option[0]] = utils.str_to_bool(flags[0])
			player.keepup_basic[option[0]] = utils.str_to_bool(flags[1])
			player.combat[option[0]] = utils.str_to_bool(flags[2])
			player.keepup_combat[option[0]] = utils.str_to_bool(flags[3])
			player.starburst[option[0]] = utils.str_to_bool(flags[4])
			player.keepup_starburst[option[0]] = utils.str_to_bool(flags[5])
		except:
			sys.exit('Invalid configuration for "' + option[0] + '".')

	path = sys.path[0]

	sys.stdout.write("> Loading configuration (%s)\n" % config_file)
	sys.stdout.write("> Loading Triggers\n")
	loadXML.load_xml_triggers(path + '/triggers/')

	sys.stdout.write("> Registering Trigger Dispatch Functions\n")
	Triggers.trigger_dispatch = loadDispatch.load_dir(path + '/inc/trigger_dispatch/')

	sys.stdout.write("> Loading Aliases\n")
	loadXML.load_xml_aliases(path + '/aliases/')

	sys.stdout.write("> Registering Alias Dispatch Functions\n")
	Aliases.alias_dispatch = loadDispatch.load_dir(path + '/inc/alias_dispatch/')

	""" Load modules from config and bootstrap them """
	sys.stdout.write("> Loading Modules\n")
	for cm in config_modules:
		if cm[1] == 'enabled':
			module = cm[0]
			load_module(module, True)

	"""
	Check all functions called by triggers/aliases and make sure
	they have dispatch functions available to them.
	"""
	error = False
	for group in triggers:
		for trigger in triggers[group]:
			functions = triggers[group][trigger].function
			for function in functions:
				if " " in function:
					function = function.split(" ")
					if function[0] == 'pfunction':
						function = function[1]
					else:
						function = function[0]

				if function not in Triggers.trigger_dispatch:
					sys.stdout.write("Error: trigger '%s::%s' calls unavailable trigger dispatch function '%s'\n" % (group, trigger, function))
					error = True
	if error:
		sys.exit()

	for group in aliases:
		for alias in aliases[group]:
			functions = aliases[group][alias].function
			for function in functions:
				if " " in function:
					function = function.split(" ")[0]

				if function not in Aliases.alias_dispatch:
					sys.stdout.write("Error: alias '%s::%s' calls unavailable alias dispatch function '%s'\n" % (group, alias, function))
					error = True

	if error:
		sys.exit()

	"""
	Ensure we don't have duplicates in triggers and aliases.
	This is the quick and dirty way...could massively optmize this.
	"""
	check_dict = {}
	for group in trigger_groups:
		check_dict[group] = check_dict.get(group, 0) + 1

	for item in check_dict:
		if check_dict[item] > 1:
			error = True
			sys.stdout.write("Error: duplicate trigger group: " + item + "\n")

	check_dict = {}
	for group in alias_groups:
		check_dict[group] = check_dict.get(group, 0) + 1

	for item in check_dict:
		if check_dict[item] > 1:
			error = True
			sys.stdout.write("Error: duplicate alias group: " + item + "\n")

	trigger_list = []
	for group in triggers:
		for trigger in triggers[group]:
			trigger_list.append(trigger)

	check_dict = {}
	for trigger in trigger_list:
		check_dict[trigger] = check_dict.get(trigger, 0) + 1

	for item in check_dict:
		if check_dict[item] > 1:
			error = True
			sys.stdout.write("Error: duplicate trigger: " + item + "\n")

	alias_list = []
	for group in aliases:
		for alias in aliases[group]:
			alias_list.append(alias)

	check_dict = {}
	for alias in alias_list:
		check_dict[alias] = check_dict.get(alias, 0) + 1

	for item in check_dict:
		if check_dict[item] > 1:
			error = True
			sys.stdout.write("Error: duplicate alias: " + item + "\n")

	if error:
		sys.exit()

	if player.combat_class == 'monk':
		enable_trigger_group('monk_balances')

	sys.stdout.write("> Initializing Twisted\n")

	import telnetProxy

	factory = telnetProxy.Factory()
	factory.protocol = telnetProxy.TelnetServer
	reactor.listenTCP(config['listen_on'], factory)

	if console_enabled:
		reactor.callWhenRunning(interact, stopReactor=True)

	reactor.addSystemEventTrigger("before", "shutdown", shutdown)
	reactor.run()
Beispiel #35
0
    def once_complex(self, dc, one_dc):
        set_title = self.start_btn.update_query
        update_status = self.update_status

        need_number = one_dc['need_number']
        num_file = one_dc["number_file"]
        num_size = one_dc['number_size']
        num_join_str = one_dc['number_join_str']
        num_join_short_str = one_dc['number_join_short_str']
        if not num_join_short_str:
            num_join_short_str = ''
        else:
            num_join_short_str = " " + num_join_short_str

        num_second = 0
        is_iqy = True if num_join_str == '爱奇艺备案号' else False
        raw_mp4 = one_dc['rawMP4']
        i = one_dc['index']
        number_second = int(dc["number_second"])
        total = one_dc['total']

        out_dir = one_dc['output_dir']
        temp_dir = one_dc['temp_dir']
        pt_second = one_dc['pt_second']
        pw_second = one_dc['pw_second']
        pt_out_file = one_dc['pt_out_file']
        pw_out_file = one_dc['pw_out_file']
        frame_size = one_dc['frame_size']
        water_size = one_dc['water_size']

        rad_var = dc['fps']
        if rad_var == 2:
            fps = '24'
        elif rad_var == 3:
            fps = '25'
        elif rad_var == 4:
            fps = '30'
        else:
            fps = '0'
        target_fps = fps
        radio_select_var = dc["bit"]

        pt_file = dc["pt_file"]
        pw_file = dc["pw_file"]
        frame_file = dc["frame_file"]
        watermark_file = dc["watermark_file"]

        pt_select = dc['pt_select']
        pw_select = dc['pw_select']
        need_frame = dc["frame_select"]
        need_watermark = dc["watermark_select"]

        double_fix_select = utils.str_to_bool(dc["select_double_fix"])
        select_30m = utils.str_to_bool(dc["select_30m"])
        fast_mode_select = False
        # fast_mode_select = dc['fast_mode_select']

        # skip_content_mp4 = False
        count = i + 1

        set_title("")
        format_str = "(%d/%d)" % (count, total) + ' %s'

        arr = utils.get_file_names(raw_mp4)
        f_name = arr[1]
        f_type = arr[2]
        f_full_name = f_name + f_type

        out_file_type = ".mpg" if select_30m else ".mp4"
        temp_video = temp_dir + "-" + f_name + out_file_type
        final_video = out_dir + f_name + out_file_type

        if need_number and num_join_str:
            temp_path = Path(out_dir) / num_join_str
            temp_path = str(temp_path) + os.sep
            utils.make_dir(temp_path)
            final_video = temp_path + f_name + out_file_type

        vb_str = ""
        need_same_bit_rate = False
        # 1) 转正片视频
        set_title(format_str % f_full_name)
        update_status(i, '10%' + num_join_short_str)

        # 匹配 尺寸和fps
        tdc = ff.get_video_info(raw_mp4, False)
        v_size = tdc["v_size"] if tdc["v_size"] else "1920x1080"
        tdc["v_size"] = v_size
        fps = tdc["fps"] if tdc["fps"] else "24"
        tdc["fps"] = fps if target_fps == '0' else target_fps
        duration = tdc['duration'] if tdc["duration"] else '0'
        duration = float(duration)

        if is_iqy:
            vb_str = "8M"
        else:
            # 码率 部分
            if radio_select_var == 1:  # 保持
                need_same_bit_rate = True
                # tdc["crf"] = 1
                vb_str = ''

            elif radio_select_var == 2:  # 自动
                tdc["crf"] = 18
                vb_str = ''

            if radio_select_var == 3:
                vb_str = "4M"

            elif radio_select_var == 4:
                vb_str = "6M"

            elif radio_select_var == 5:
                vb_str = "8M"

            elif radio_select_var == 6:
                vb_str = "10M"

            elif radio_select_var == 7:
                vb_str = "30M"

        obj = ff.create_obj()
        obj.input_file = raw_mp4
        obj.output_file = temp_video
        obj.need_same_bit_rate = need_same_bit_rate
        obj.need_30m = select_30m
        # obj.set_video_info(tdc)
        # obj.fps = fps
        # obj.size = v_size
        obj.set_video_info(tdc, vb_str)

        if need_number:
            if number_second == -1:
                num_second = duration + pt_second + pw_second
            else:
                num_second = number_second

        if double_fix_select and duration:
            obj.time_start = 0
            obj.time_to = duration
            duration_string = ff.millisecond_to_str(int(duration * 1000))
            set_title(format_str % ("*[双倍时长修正]该视频时长:" + duration_string))

        png_list = []
        msg_str = '正在转换 正片('
        if need_frame:
            png_list.append(["加幕布", frame_file, frame_size, 0])
        if need_watermark:
            png_list.append([" 加水印", watermark_file, water_size, 0])
        if need_number:
            t = num_second - pt_second
            png_list.append([" 加备案号", num_file, num_size, t])
        if len(png_list):
            sizes = []
            times = []
            npngs = []
            for p in png_list:
                msg_str += p[0]
                npngs.append(p[1])
                sizes.append(p[2])
                times.append(p[3])
            png_list = npngs
            obj.set_overlay(png_list, sizes, times)

            msg_str += ')……'
            msg_str = msg_str.replace('()', '')
            set_title(format_str % msg_str)

        # 可以不转换片头的情况
        # 没有选择任何合成功能时,会对正片进行一次转码操作,后面会进行处理
        if not need_frame and not need_watermark and not need_number and not double_fix_select:
            skip_content_mp4 = True
        else:
            skip_content_mp4 = False
            update_status(i, '20%' + num_join_short_str)
            obj.execute()

        # 2) 有片头或片尾需要合成
        if pt_select or pw_select:
            # 生成concat.txt, 并转换片头/片尾
            subs = []
            # 1
            if pt_select:
                nobj = ff.create_obj()
                nobj.input_file = pt_file
                nobj.output_file = pt_out_file
                nobj.need_30m = select_30m
                nobj.need_same_bit_rate = need_same_bit_rate
                # nobj.fps = fps
                # nobj.size = v_size
                nobj.set_video_info(tdc, vb_str)
                # 需要添加备案号
                msg_str = "正在转换 片头"
                if need_number and num_second:
                    msg_str += '(加备案号)'
                    if pt_second < num_second:
                        nobj.set_overlay([num_file], [num_size])
                    else:
                        nobj.set_overlay([num_file], [num_size], [pt_second])

                msg_str += '……'
                set_title(format_str % msg_str)
                update_status(i, '40%' + num_join_short_str)
                nobj.execute()
                subs.append(pt_out_file)
            # 2
            if skip_content_mp4:
                if fast_mode_select and ff.compare_video(raw_mp4, pt_out_file):
                    subs.append(raw_mp4)  # 让正片参与最后的拼接,但不能删除正片
                    msg_str = "没有水印等,不转换正片,直接进行合并"
                    set_title(format_str % msg_str)
                else:
                    # 和片头的视频参数不一致,进行一次转码
                    obj.set_video_info(tdc, vb_str)  # 此操作能恢复之前的大多数参数
                    msg_str = "正在转换 正片"
                    msg_str += '……'
                    set_title(format_str % msg_str)
                    update_status(i, '50%' + num_join_short_str)
                    obj.execute()
                    subs.append(temp_video)
            else:
                subs.append(temp_video)

            # 3
            if pw_select:
                nobj = ff.create_obj()
                nobj.input_file = pw_file
                nobj.output_file = pw_out_file
                nobj.need_same_bit_rate = need_same_bit_rate
                nobj.need_30m = select_30m
                # nobj.fps = fps
                # nobj.size = v_size
                nobj.set_video_info(tdc, vb_str)

                # 需要添加备案号
                msg_str = "正在转换 片尾"
                t = pt_second + duration
                if need_number and t < num_second:
                    msg_str += '(加备案号)'
                    new_t = num_second - t
                    nobj.set_overlay([num_file], [num_size], [new_t])
                msg_str += "……"
                set_title(format_str % msg_str)
                update_status(i, '60%' + num_join_short_str)
                nobj.execute()
                subs.append(pw_out_file)

            # 拼接视频
            set_title(format_str % "拼接中……")
            update_status(i, '90%' + num_join_short_str)
            sub_txt = temp_dir + "concat_" + f_name + ".txt"
            ff.concat(subs, final_video, sub_txt)
            # 移除 concat.txt 和 mp4
            utils.remove_file(sub_txt)
            if not skip_content_mp4:
                utils.remove_file(temp_video)
        else:
            # 没有任何选项 仅对正片进行一次转码操作
            if skip_content_mp4:
                obj.execute()
                utils.move_file(temp_video, final_video)
            else:
                utils.move_file(temp_video, final_video)
        self.final_video = final_video
        update_status(i, 'OK')
Beispiel #36
0
    def process(self, dc, _):
        set_title = self.start_btn.update_query

        lists = dc["input_files"]
        output_dir = dc["output_dir"] + os.sep
        output_dir = str(Path(output_dir)) + os.sep
        temp_dir = output_dir + 'tempDir' + os.sep
        utils.make_dir(temp_dir)
        utils.hide_file(temp_dir)

        pt_file = dc["pt_file"]
        pw_file = dc["pw_file"]
        frame_file = dc["frame_file"]
        watermark_file = dc["watermark_file"]
        number_file = dc["number_file"]
        number_file_2 = dc["number_file_2"]
        number_file_3 = dc["number_file_3"]

        pt_select = dc['pt_select']
        pw_select = dc['pw_select']
        need_frame = dc["frame_select"]
        need_watermark = dc["watermark_select"]
        need_number = dc['number_select']
        need_number_2 = dc['number_select_2']
        need_number_3 = dc['number_select_3']

        # double_fix_select = dc["double_fix_select"]
        # fast_mode_select = False
        # fast_mode_select = dc['fast_mode_select']
        rad_var = dc['fps']
        if rad_var == 2:
            fps = '24'
        elif rad_var == 3:
            fps = '25'
        elif rad_var == 4:
            fps = '30'
        else:
            fps = '0'

        # 30m方案特殊处理
        select_30m = utils.str_to_bool(dc["select_30m"])
        if select_30m:
            pt_out_file = temp_dir + "--pt.mpg"
            pw_out_file = temp_dir + "--pw.mpg"
        else:
            pt_out_file = temp_dir + "--pt.mp4"
            pw_out_file = temp_dir + "--pw.mp4"

        pf = ''
        if pt_select:
            pf = "加片头"
        if pw_select:
            pf += "加片尾"
        if need_frame:
            pf += ",加幕布"
        if need_watermark:
            pf += ",加水印"
        if need_number:
            pf += ",加备案号"
        pf = pf.strip(', ')
        self.titlePrefix = pf

        frame_size = '0x0'
        water_size = '0x0'
        number_size = '0x0'
        number_size_2 = '0x0'
        number_size_3 = '0x0'
        if need_frame and frame_file:
            frame_size = utils.get_image_size2(frame_file)

        if need_watermark and watermark_file:
            water_size = utils.get_image_size2(watermark_file)

        if need_number and number_file:
            number_size = utils.get_image_size2(number_file)
        if need_number_2 and number_file_2:
            number_size_2 = utils.get_image_size2(number_file_2)
        if need_number_3 and number_file_3:
            number_size_3 = utils.get_image_size2(number_file_3)

        # 片头持续时间
        pt_second = 0
        if pt_select and pt_file:
            print(pt_file)
            tdc = ff.get_video_info(pt_file, False)  # 匹配 尺寸和fps
            pt_second = tdc['duration'] if tdc["duration"] else '0'
            pt_second = float(pt_second)

        # 片尾持续时间
        pw_second = 0
        if pw_select and pw_file:
            tdc = ff.get_video_info(pw_file, False)  # 匹配 尺寸和fps
            pw_second = tdc['duration'] if tdc["duration"] else '0'
            pw_second = float(pw_second)

        seq = (['index', -1], ['total', 0], ['rawMP4', ''], ['output_dir', ''],
               ['temp_dir', ''], ['pt_second',
                                  0], ['pw_second',
                                       0], ['pt_out_file',
                                            ''], ['pw_out_file',
                                                  ''], ['frame_size', ''],
               ['water_size',
                ''], ['need_number',
                      False], ['number_file',
                               ''], ['number_size',
                                     ''], ['number_join_str',
                                           ''], ['number_join_short_str',
                                                 ''], ['fps', 24])
        # one_dc = dict.fromkeys(seq, "")
        one_dc = {}
        for key_value in seq:
            one_dc.setdefault(key_value[0], key_value[1])
        one_dc['total'] = len(lists)
        one_dc['output_dir'] = output_dir
        one_dc['temp_dir'] = temp_dir
        one_dc['pt_second'] = pt_second
        one_dc['pw_second'] = pw_second
        one_dc['pt_second'] = pt_second
        one_dc['pt_out_file'] = pt_out_file
        one_dc['pw_out_file'] = pw_out_file
        one_dc['fps'] = fps
        if need_frame and frame_file:
            one_dc['frame_size'] = frame_size
        if need_watermark and watermark_file:
            one_dc['water_size'] = water_size

        for i in range(len(lists)):
            one_dc['index'] = i
            one_dc['rawMP4'] = lists[i]

            if need_number:
                one_dc['number_join_str'] = ''
                one_dc['number_join_short_str'] = ''
                one_dc['need_number'] = need_number
                one_dc['number_file'] = number_file
                one_dc['number_size'] = number_size
                self.once_complex(dc, one_dc)

            if need_number_2:
                one_dc['number_join_str'] = '爱奇艺备案号'
                one_dc['number_join_short_str'] = '爱奇艺'
                one_dc['need_number'] = need_number_2
                one_dc['number_file'] = number_file_2
                one_dc['number_size'] = number_size_2
                self.once_complex(dc, one_dc)

            if need_number_3:
                one_dc['number_join_str'] = '腾讯备案号'
                one_dc['number_join_short_str'] = '腾讯'
                one_dc['need_number'] = need_number_3
                one_dc['number_file'] = number_file_3
                one_dc['number_size'] = number_size_3
                self.once_complex(dc, one_dc)

            # 没有备案号任务 只执行一次
            if not need_number and \
                    not need_number_2 and \
                    not need_number_3:
                one_dc['need_number'] = False
                # one_dc['number_file'] = number_file
                # one_dc['number_size'] = number_size
                # one_dc['number_join_str'] = ''
                # one_dc['number_join_short_str'] = ''
                self.once_complex(dc, one_dc)

        # 删除临时文件
        utils.remove_file(pt_out_file)
        utils.remove_file(pw_out_file)

        print("完成!\n输出目录:" + output_dir)
        set_title("操作结束!")
        set_title("")

        # 自动打开目录
        if self.final_video and os.path.exists(self.final_video):
            utils.open_file(self.final_video, True)
        else:
            utils.open_dir(output_dir)

        self.t1 = ""
        self.lock_btn(False)

        # 检查并执行关机
        self.cb_shutdown.shutdown()
Beispiel #37
0
def train(config, model_dir):
    """Given a Configuration object containing training settings and hyperparameters, the train method launches a Trainer instance
    which trains a neural network model.
    
    Arguments:
        config {Configuration} -- Configuration object of settings, from a JSON file.
        model_dir {string} -- Path to the target directory of logs and results
    
    Returns:
        loss {double} -- The final validation or training loss, depending on the Trainer object.
    """

    # Record time
    now = datetime.now()

    # Set seed
    if config.cuda:
        torch.cuda.manual_seed(2222)
    else:
        torch.manual_seed(2222)

    params = config.params
    pretrain = config.pretrain
    data_path = config.data_path
    source = config.source
    mode = config.mode
    trainer_name = config.trainer_name
    model_name = config.model_name
    optimizer_name = config.optimizer_name

    n_epochs = params.n_epochs
    lr = params.lr
    momentum = params.momentum
    batch_size = params.batch_size

    use_transform = utils.str_to_bool(config.transform)
    use_val = utils.str_to_bool(config.validation)

    # Define a loss function. reduction='none' is elementwise loss, later summed manually
    criterion = nn.MSELoss(reduction='none')

    ############### INITIALISE MODEL AND OPTIMIZER ######################
    # Define a model and optimizer pair
    model, optimizer = utils.get_model_and_optimizer(model_name,
                                                     optimizer_name, pretrain,
                                                     params)

    ############## GET DATALOADERS ########################
    # Get dataset of recovery curves
    logging.info("Loading the datasets...")
    dataset = utils.get_dataset(source, data_path, model_dir, mode,
                                use_transform, params)
    logging.info("- Loading complete.")

    # Initialize a Regressor training object
    logging.info("Initializing trainer object...")
    trainer = utils.get_trainer(trainer_name, model, config, criterion,
                                optimizer, dataset, model_dir)
    logging.info("- Initialization complete.")

    ################ TRAIN THE MODEL ######################
    logging.info("Starting training for {} epoch(s)...".format(n_epochs))
    trainer.train()
    logging.info("- Training complete.")

    torch.save(trainer.model,
               os.path.join(model_dir,
                            now.strftime("%Y%m%d-%H%M") + ".pt"))

    return trainer.loss
Beispiel #38
0
print('z_noise', z_noise)
rlr = float(rlr)
print('rlr', rlr)
dglr = float(dglr)
print('dglr', dglr)
sslr = float(sslr)
print('sslr', sslr)
drop = float(drop)
print('drop', drop)
cfy = float(cfy)
print('cfy', cfy)
cfz = float(cfz)
print('cfz', cfz)
dataset = dataset
print('dataset', dataset)
decrease = xx.str_to_bool(decrease)
print('decrease', decrease)
n_train = int(n_train)
print('n_train', n_train)
n_test = int(n_test)
print('n_test', n_test)
epochs = int(epochs)
print('epochs', epochs)
ss_labels = int(ss_labels)
print('ss_labels', ss_labels)
seed = int(seed)
print('seed', seed)
rep = int(rep)
print('rep', rep)

name = '{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.\
Beispiel #39
0
import os
import logging
import json
from utils import str_to_bool, ts_to_date

import requests

from flask import Flask, request, Response, abort

APP = Flask(__name__)

LOG_LEVEL = os.environ.get('LOG_LEVEL', "INFO")

API_PATH = os.environ.get('API_BASE_PATH')

CONVERT_TS_TO_DATE_STR = str_to_bool(os.environ.get('CONVERT_TS_TO_DATE_STR', 'true'))

if not API_PATH:
    logging.error("API_PATH required")
    exit(1)

# at least Ocp-Apim-Subscription-Key header with API access key must be provided
HEADERS = json.loads(os.environ.get("HEADERS", '{}'))

if not HEADERS.get('Ocp-Apim-Subscription-Key'):
    logging.error("Subscription key not found")
    exit(1)

PORT = os.environ.get('PORT', 5001)