Example #1
0
def getconfig(configfile=None, debug=False, silent=False):
    """
    read config file

    Note the Python 2 ConfigParser module has been renamed to configparser
    in Python 3 so it better to use import configparser in Python 2 for
    future proofing

    see also getconfig.cfg

    TODO: catch exceptions

    Support for lists:

    see:

    https://stackoverflow.com/questions/335695/lists-in-configparser

    https://github.com/cacois/python-configparser-examples

    look in cwd, home and home/.config

    home/.config not implemented yet


    """
    import os
    import configparser

    # read the configuration file
    # config = configparser.RawConfigParser()
    config = configparser.SafeConfigParser()

    print('__file__', __file__)
    print('configfile:', configfile)
    configfile_default = os.path.splitext(__file__)[0] + '.cfg'
    print('configfile_default:', configfile_default)

    if configfile is None:
        configfile_default = os.path.splitext(__file__)[0] + '.cfg'
        if debug:
            print('__file__', __file__)
            print('configfile_default:', configfile_default)
        configfile = configfile_default

    print('Open configfile:', configfile)
    if debug:
        print('Open configfile:', configfile)

    try:
        if not silent:
            print('Reading config file', configfile)

        try:
            config.read(configfile)
        except IOError:
            print('config file', configfile, "does not exist")
            configfile = os.path.join(os.environ["HOME"], configfile)
            print('trying ', configfile)
            config.read(configfile)

    except Exception as e:
        print('Problem reading config file: ', configfile)
        print(e)

    if debug:
        print('configfile:', configfile)
        print('sections:', config.sections())
        for section_name in config.sections():
            print('Section:', section_name)
            print('Options:', config.options(section_name))
            for name, value in config.items(section_name):
                print('  %s = %s' % (name, value))
        print()

        for section_name in config.sections():
            print()
            print('Section:', section_name)
            for name, value in config.items(section_name):
                print('  %s = %s' % (name, value))
                print(section_name, ':', name, config.get(section_name, name))
        print()

    return config
Example #2
0
def get_args_parser():
    config_dir = os.path.abspath(
        os.path.expanduser(
            os.environ.get("XDG_CONFIG_HOME") or os.environ.get("APPDATA")
            or os.path.join(os.environ["HOME"], ".config")))

    initial_parser = argparse.ArgumentParser(description="dorkbot",
                                             add_help=False)
    initial_parser.add_argument("-c", "--config", \
        default=os.path.join(config_dir, "dorkbot", "dorkbot.ini"), \
        help="Configuration file")
    initial_parser.add_argument("-r", "--directory", \
        default=os.getcwd(), \
        help="Dorkbot directory (default location of db, tools, reports)")
    initial_args, other_args = initial_parser.parse_known_args()

    defaults = {
        "database": os.path.join(initial_args.directory, "dorkbot.db"),
    }

    if os.path.isfile(initial_args.config):
        config = configparser.SafeConfigParser()
        config.read(initial_args.config)
        options = config.items("dorkbot")
        defaults.update(dict(options))

    parser = argparse.ArgumentParser(parents=[initial_parser])
    parser.set_defaults(**defaults)
    parser.add_argument("--log", \
        help="Path to log file")
    parser.add_argument("-V", "--version", action="version", \
        version="%(prog)s " + __version__, help="Print version")

    database = parser.add_argument_group('database')
    database.add_argument("-d", "--database", \
        help="Database file/uri")
    database.add_argument("-u", "--prune", action="store_true", \
        help="Delete unscannable targets (blacklist / fingerprinting)")

    targets = parser.add_argument_group('targets')
    targets.add_argument("-l", "--list-targets", action="store_true", \
        help="List targets in database")
    targets.add_argument("--add-target", metavar="TARGET", \
        help="Add a url to the target database")
    targets.add_argument("--delete-target", metavar="TARGET", \
        help="Delete a url from the target database")
    targets.add_argument("--flush-targets", action="store_true", \
        help="Delete all targets")

    indexing = parser.add_argument_group('indexing')
    indexing.add_argument("-i", "--indexer", \
        help="Indexer module to use")
    indexing.add_argument("-o", "--indexer-option", action="append", \
        help="Pass an option to the indexer (can be used multiple times)")

    scanning = parser.add_argument_group('scanning')
    scanning.add_argument("-s", "--scanner", \
        help="Scanner module to use")
    scanning.add_argument("-p", "--scanner-option", action="append", \
        help="Pass an option to the scanner (can be used multiple times)")

    fingerprints = parser.add_argument_group('fingerprints')
    fingerprints.add_argument("-f", "--flush-fingerprints", action="store_true", \
        help="Delete all fingerprints of previously-scanned items")

    blacklist = parser.add_argument_group('blacklist')
    blacklist.add_argument("-b", "--blacklist", \
        help="Blacklist file/uri")
    blacklist.add_argument("--list-blacklist", action="store_true", \
        help="List blacklist entries")
    blacklist.add_argument("--add-blacklist-item", metavar="ITEM", \
        help="Add an ip/host/regex pattern to the blacklist")
    blacklist.add_argument("--delete-blacklist-item", metavar="ITEM", \
        help="Delete an item from the blacklist")
    blacklist.add_argument("--flush-blacklist", action="store_true", \
        help="Delete all blacklist items")

    args = parser.parse_args(other_args)
    args.directory = initial_args.directory
    return args, parser
Example #3
0
 def readIniFile(self):
     conf = configparser.SafeConfigParser()
     conf.read('properties.ini',encoding='utf-8')
     for section in conf.sections():
         self.getSectionLinkedValue(conf, section)
Example #4
0
    def cleaned_config(self):
        shutil.copy(self.configpath, self.cleanpath)
        tmpconfig = configparser.SafeConfigParser()
        tmpconfig.readfp(codecs.open(self.cleanpath, 'r', 'utf8'))

        for v in self.cleaned_list:
            try:
                tmpkey = tmpconfig.get(v[0], v[1])
                if all([tmpkey is not None, tmpkey != 'None']):
                    if tmpkey[:5] == '^~$z$':
                        tk = encrypted.Encryptor(tmpkey)
                        tk_stat = tk.decrypt_it()
                        if tk_stat['status'] is True:
                            tmpkey = tk_stat['password']
                    if tmpkey not in self.keylist:
                        self.keylist.append(tmpkey)
                    tmpconfig.set(v[0], v[1], 'xXX[REMOVED]XXx')
            except (configparser.NoSectionError,
                    configparser.NoOptionError) as e:
                pass

        for h in self.hostname_list:
            try:
                hkey = tmpconfig.get(h[0], h[1])
                if all([hkey is not None, hkey != 'None']):
                    if hkey[:5] == '^~$z$':
                        hk = encrypted.Encryptor(hkey)
                        hk_stat = tk.decrypt_it()
                        if tk_stat['status'] is True and 'username' not in h[1]:
                            hkey = hk_stat['password']
                    if hkey not in self.keylist:
                        self.keylist.append(hkey)
                    tmpconfig.set(h[0], h[1], 'xXX[REMOVED]XXx')
            except (configparser.NoSectionError,
                    configparser.NoOptionError) as e:
                pass

        extra_newznabs = list(
            zip(*
                [iter(tmpconfig.get('Newznab', 'extra_newznabs').split(', '))
                 ] * 7))
        extra_torznabs = list(
            zip(*
                [iter(tmpconfig.get('Torznab', 'extra_torznabs').split(', '))
                 ] * 7))
        cleaned_newznabs = []
        cleaned_torznabs = []
        for ens in extra_newznabs:
            n_host = None
            n_uid = None
            n_api = None
            if ens[1] is not None:
                n_host = 'xXX[REMOVED]XXx'
            if ens[3] is not None:
                nzkey = ens[3]
                if nzkey[:5] == '^~$z$':
                    nz = encrypted.Encryptor(nzkey)
                    nz_stat = nz.decrypt_it()
                    if nz_stat['status'] is True:
                        nzkey = nz_stat['password']
                if nzkey not in self.keylist:
                    self.keylist.append(nzkey)
                n_api = 'xXX[REMOVED]XXx'
            if ens[4] is not None:
                n_uid = 'xXX[REMOVED]XXx'
            newnewzline = (ens[0], n_host, ens[2], n_api, n_uid, ens[5],
                           ens[6])
            cleaned_newznabs.append(newnewzline)

        for ets in extra_torznabs:
            logger.info('torznab: %s' % (ets, ))
            n_host = None
            n_uid = None
            n_api = None
            if ets[1] is not None:
                n_host = 'xXX[REMOVED]XXx'
            if ets[3] is not None:
                tzkey = ets[3]
                if tzkey[:5] == '^~$z$':
                    tz = encrypted.Encryptor(tzkey)
                    tz_stat = tz.decrypt_it()
                    if tz_stat['status'] is True:
                        tzkey = tz_stat['password']
                if tzkey not in self.keylist:
                    self.keylist.append(tzkey)
                n_api = 'xXX[REMOVED]XXx'
            if ets[4] is not None:
                n_uid = 'xXX[REMOVED]XXx'
            newtorline = (ets[0], n_host, ets[2], n_api, ets[4], ets[5],
                          ets[6])
            cleaned_torznabs.append(newtorline)

        tmpconfig.set('Newznab', 'extra_newznabs',
                      ', '.join(self.write_extras(cleaned_newznabs)))
        tmpconfig.set('Torznab', 'extra_torznabs',
                      ', '.join(self.write_extras(cleaned_torznabs)))
        try:
            with codecs.open(self.cleanpath, encoding='utf8',
                             mode='w+') as tmp_configfile:
                tmpconfig.write(tmp_configfile)
            logger.fdebug(
                'Configuration cleaned of keys/passwords and written to temporary location.'
            )
        except IOError as e:
            logger.warn("Error writing configuration file: %s" % e)
        "pythonpath/lightproof___implname__.py" ]:
        z.writestr(
            i.replace("__implname__", a["implname"]),
            Template(open(i, "r", encoding="utf-8").read()).safe_substitute(a))

    for i in a["extras"].split(","):
        z.writestr(i.strip().replace("../", "").replace("__implname__", a["implname"]), \
        open(fn[:fn.rfind("/")+1] + i.strip(), 'rb').read())

    try:
        d = open(fn + ".dlg", "r", encoding="utf-8").readlines()
        Dialog.c(a["implname"], d, z, a["lang"])
    except:
        z.writestr("pythonpath/lightproof_opts_%s.py" % a["implname"], "")


if len(sys.argv) == 1:
    print("""Synopsis: python make.py config_file
eg. python make.py src/en/en.cfg""")
    sys.exit(0)

fArgs = cp.SafeConfigParser()
for i in sys.argv[1:]:
    try:
        fArgs.read(i)
        dist(i[:-4], fArgs._sections['args'])
    except:
        print(traceback.format_exc())
        print("missing config file or options: " + str(i))
        sys.exit(0)
Example #6
0
                                   color a single instruction at current eip if called without argument
 > translate <base> <addr> <mod> = rebase an address with respect to local module's base\n\n"""
              )


if __name__ == "__main__":

    locations = [
        os.path.join(os.path.realpath(os.path.dirname(__file__)), ".sync"),
        os.path.join(os.environ['HOME'], ".sync")
    ]

    for confpath in locations:
        if os.path.exists(confpath):
            config = configparser.SafeConfigParser({
                'host': HOST,
                'port': PORT
            })
            config.read(confpath)
            HOST = config.get("INTERFACE", 'host')
            PORT = config.getint("INTERFACE", 'port')
            print("[sync] configuration file loaded %s:%s" % (HOST, PORT))
            break

    sync = Sync()
    Syncoff(sync)
    Cmt(sync)
    Rcmt(sync)
    Fcmt(sync)
    Bc(sync)
    Translate(sync)
    Cmd(sync)
Example #7
0
def _read_conf(file_path):
    conf = configparser.SafeConfigParser()
    conf.optionxform = str
    with codecs.open(file_path, 'r', encoding='utf-8') as fp:
        conf.readfp(fp)
    return parse_config(conf)
Example #8
0
def get(ctx, rtems_major_version):
    global _version
    global _date
    global _released
    version = _version
    date = _date
    released = _released
    if _version == 'invalid':
        version = rtems_major_version
        date = _date
        released = False
        #
        # Is there a VERSION file for a release or deployed source.
        #
        vc = 'VERSION'
        if os.path.exists(vc):
            try:
                import configparser
            except ImportError:
                import ConfigParser as configparser
            v = configparser.SafeConfigParser()
            try:
                v.read(vc)
            except Exception as e:
                ctx.fatal('Invalid version config format: %s: %s' % (vc, e))
            try:
                version = v.get('version', 'revision')
                date = v.get('version', 'date')
            except Exception as e:
                ctx.fatal('Invalid version file: %s: %s' % (vc, e))
            if not 'not_released' in version:
                _released = True
        else:
            #
            # Get date and version from Git
            #
            if ctx.exec_command(
                [ctx.env.GIT[0], 'diff-index', '--quiet', 'HEAD']) == 0:
                modified = ''
            else:
                modified = '-modified'
            out = ctx.cmd_and_log([
                ctx.env.GIT[0], 'log', '-1', '--format=%h,%cd', '--date=short'
            ],
                                  quiet=True)
            f = out.strip('\n').split(',')
            version = version + '.' + f[0] + modified
            date = _pretty_day(ctx, f[1])
        _version = version
        _date = date
        _release = released
    if version != 'invalid':
        vs = _version.split('.')
        _major = vs[0]
        if len(vs) == 3:
            _minor = vs[1]
            _revision = vs[2]
        elif len(vs) == 2:
            _minor = 0
            _revision = vs[1]
        else:
            ctx.fatal('Invalid version format: %s' % (_version))
    return version, date, released
Example #9
0
def userProcedure(reqId):
    """Respond to the requests."""
    libdp.log("userProcedure")
    reqParams = configparser.SafeConfigParser()
    reqParams.read(libdp.wd(reqId, "public") + config.reqInfo)

    preparameters = reqParams.get('config', 'preProcParameters').split()
    preresources = libdp.parseToDic(reqParams.get('config', 'preResources'))

    numOutput = int(reqParams.get('config', 'numOutput'))

    postparameters = reqParams.get('config', 'postProcParameters').split()
    postresources = libdp.parseToDic(reqParams.get('config', 'postResources'))

    # Public Policy
    sys.path.append(config.publicPolicy)
    publicpolicy = __import__(config.publicPolicy)
    if publicpolicy.policy(reqId=reqId):
        libdp.updateTotalCost(config.totalCostStorage,
                              libdp.measureRisk(reqId))
        os.makedirs (libdp.wd(reqId, "private") + config.sensitiveData)
        shutil.copyfile(config.sensitiveData + 'data.ini',
                        libdp.wd(reqId, "private") + config.sensitiveData + 'data.ini')

        # insecureSandbox(reqId=reqId,
        #          program=libdp.wd(reqId, "public") + config.preProc,
        #          inputFile=libdp.wd(reqId, "private") + config.sensitiveData,
        #          outputFile=libdp.wd(reqId, "private") + config.preProcResult,
        #          extraParams=preparameters,
        #          resources=preresources)

        secureSandbox(reqId=reqId,
                 program= config.preProc,
                 inputFile= config.sensitiveData,
                 outputFile= config.preProcResult,
                 extraParams=preparameters,
                 resources=preresources)

        # Private Policy
        sys.path.append(config.privatePolicy)
        privatepolicy = __import__(config.privatePolicy)
        if not privatepolicy.policy(reqId=reqId):
            # TODO: replace with dummy value
            print ("xxxxx")

        # insecureSandbox(reqId=reqId,
        #         program=os.getcwd() + "/" + config.Clamp + ".py",
        #         inputFile=libdp.wd(reqId, "private") + config.preProcResult,
        #         outputFile=libdp.wd(reqId, "private") + config.randomizedResult,
        #         extraParams=[libdp.wd(reqId, "public"), str(numOutput)],
        #         resources={'timeout': 1, 'cpu': 1,}
        #         )

        shutil.copyfile(config.Clamp + '.py', libdp.wd(reqId, "public") +  config.Clamp + '.py' )
        secureSandbox(reqId=reqId,
                program=  config.Clamp + ".py",
                inputFile= config.preProcResult,
                outputFile= config.randomizedResult,
                extraParams=["/scripts/", str(numOutput)],
                resources={'timeout': 1, 'cpu': 1,}
                )

        # insecureSandbox(reqId=reqId,
        #         program=libdp.wd(reqId, "public") + config.postProc,
        #         inputFile=libdp.wd(reqId, "private") + config.randomizedResult,
        #         outputFile=libdp.wd(reqId, "private") + config.postprocResult,
        #         extraParams=postparameters,
        #         resources=postresources)

        secureSandbox(reqId=reqId,
                program= config.postProc,
                inputFile= config.randomizedResult,
                outputFile= config.postprocResult,
                extraParams=postparameters,
                resources=postresources)

        return True
    else:
        # run out of budget
        libdp.errorXml(libdp.wd(reqId, "private") + config.postprocResult,
                       "Policy denies the request")
        return False
Example #10
0
    def __init__(self,argv):
        ''' Defines the arguments used in several scripts of this project.
        It reads them from a config file
        and also add the arguments found in command line.

        If an argument exists in both, the value in the command line overwrites
        the value in the config file
        '''

        # Do argv default this way, as doing it in the functional
        # declaration sets it at compile time.
        if argv is None:
            argv = sys.argv

        # Parse any conf_file specification
        # We make this parser with add_help=False so that
        # it doesn't parse -h and print help.
        conf_parser = argparse.ArgumentParser(
            description=__doc__, # printed with -h/--help
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
            # Turn off help, so we print all options in response to -h
            add_help=False
            )
        conf_parser.add_argument("-c", "--conf_file",
                            help="Specify config file", metavar="FILE")
        args, self.remaining_argv = conf_parser.parse_known_args()

        defaults = {}

        if args.conf_file:
            config = configparser.SafeConfigParser()
            config.read([args.conf_file])
            defaults.update(dict(config.items("default")))

        # Parse rest of arguments
        # Don't suppress add_help here so it will handle -h
        self.parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            parents=[conf_parser]
            )
        self.parser.set_defaults(**defaults)

        # Training settings
        #parser = argparse.ArgumentParser(description='PyTorch MNIST Example')

        self.parser.add_argument('--stop_crit', type=float, metavar='M',
                            help='The threshold value under which the training stops')
        self.parser.add_argument('--epochs', type=int, metavar='N',
                            help='number of epochs to train')

        self.parser.add_argument('--lr', type=str2FloatList,metavar='LR',
                            help='learning rate (it can be a schedule : --lr 0.01,0.001,0.0001)')
        self.parser.add_argument('--num_workers', type=int,metavar='NUMWORKERS',
                            help='the number of processes to load the data. num_workers equal 0 means that it’s \
                            the main process that will do the data loading when needed, num_workers equal 1 is\
                            the same as any n, but you’ll only have a single worker, so it might be slow')
        self.parser.add_argument('--momentum', type=float, metavar='M',
                            help='SGD momentum')
        self.parser.add_argument('--seed', type=int, metavar='S',
                            help='Seed used to initialise the random number generator.')
        self.parser.add_argument('--log_interval', type=int, metavar='N',
                            help='how many epochs to train before logging training status')

        self.parser.add_argument('--ind_id', type=int, metavar='IND_ID',
                            help='the id of the individual')
        self.parser.add_argument('--exp_id', type=str, metavar='EXP_ID',
                            help='the id of the experience')
        self.parser.add_argument('--dataset', type=str, metavar='N',help='the dataset to use. Can be \'NETFLIX\', \'IRCCYN\' or \'VQEG\'.')

        self.parser.add_argument('--cuda', type=str2bool, metavar='S',
                            help='To run computations on the gpu')
        self.parser.add_argument('--optim', type=str, metavar='OPTIM',
                            help='the optimizer to use (default: \'GD\')')

        self.parser.add_argument('--start_mode', type=str,metavar='SM',
                    help='The mode to use to initialise the model. Can be \'base_init\', \'iter_init\' or \'fine_tune\'.')

        self.parser.add_argument('--true_scores_init', type=str,metavar='SM',
                    help='The function name to use to init the true scores when using aproximate init. Can only be \'base\'')
        self.parser.add_argument('--bias_init', type=str,metavar='SM',
                    help='The function name to use to init the biases when using aproximate init. Can only be \'base\'')
        self.parser.add_argument('--diffs_init', type=str,metavar='SM',
                    help='The function name to use to init the difficulties when using aproximate init. Can only be \'base\'')
        self.parser.add_argument('--incons_init', type=str,metavar='SM',
                    help='The function name to use to init the inconsistencies when using aproximate init. Can only be \'base\', \'use_diffs\'.')

        self.parser.add_argument('--train_mode', type=str,metavar='TM',
                    help='The mode to use to train the model. Can be \'joint\' or \'alternate\'.')
        self.parser.add_argument('--alt_epoch_nb', type=int,metavar='TM',
                    help='The number of epoch during which train each parameter. Ignored if using \'joint\' training mode.')

        self.parser.add_argument('--noise', type=float, metavar='NOISE',
                    help='the amount of noise to add in the gradient of the model (as a percentage of the norm)(default: 0.1)')

        self.parser.add_argument('--prior', type=str,metavar='S',\
                        help='The prior to use. Can be \'uniform\' or \'oracle\'.')
        self.parser.add_argument('--prior_weight', type=float,metavar='S',\
                        help='The weight of the prior term in the loss function')

        self.parser.add_argument('--param_to_opti', type=strToStrList,metavar='V',
                            help="The parameters to optimise. Can be a list with elements among 'bias','incons','diffs','trueScores'")
        self.parser.add_argument('--param_not_gt',type=strToStrList,metavar='V',
                            help="The parameters to set to ground truth when not starting the training witha pre-trained net \
                                (i.e. choosing option 'init' for --start_mode). Can be a list (possibly empty) with elements among 'bias','incons','diffs','trueScores'")

        self.parser.add_argument('--note', type=str,metavar='NOTE',
                            help="A note on the model")

        self.parser.add_argument('--score_dis', type=str, metavar='S',
                            help='The distribution to use to model the scores')

        self.parser.add_argument('--score_min', type=int, metavar='S',
                            help='The minimum score that can be given by an annotator')
        self.parser.add_argument('--score_max', type=int, metavar='S',
                            help='The maximum score that can be given by an annotator')
        self.parser.add_argument('--div_beta_var', type=float, metavar='S',
                            help='The coefficient with which to rescale down the variances (difficulties and inconsistencies) \
                            sampled from the beta distribution')

        self.parser.add_argument('--prior_update_frequ', type=int, metavar='S',
                            help='The number of epoch to wait before updating the empirical prior. Ignored if other prior is used.')

        self.parser.add_argument('--extr_sco_dep', type=str2bool, metavar='S',
                            help='Whether or not to add a dependency between the variance and the mean of videos. If true, raw score variance of videos with very high or very low scores\
                            will be lower.')

        self.parser.add_argument('--truescores_tanh', type=str2bool, metavar='S',
                            help='To pass the true scores through a tanh during optimisation.')

        self.parser.add_argument('--bias_tanh', type=str2bool, metavar='S',
                            help='To pass the bias through a tanh during optimisation.')
        self.parser.add_argument('--bias_ampl', metavar='STD',type=float,help='The amplitude of the bias gaussian distribution. Ignored if bias are sampled from a normal distribution')

        self.parser.add_argument('--bias_dis', metavar='STD',type=str,default='Beta',help='The bias distribution. Can be \'Beta\' or \'Normal\'')

        self.args = None
Example #11
0
# Read config
if "RABBIT_HOST" not in os.environ:
    os.environ["RABBIT_HOST"] = "localhost"

# Find config in different locations
config_file = None
if os.path.isfile('config.ini'):
    config_file = 'config.ini'
elif os.path.isfile('../config.ini'):
    config_file = '../config.ini'
elif os.path.isfile('../tracing/config.ini'):
    config_file = '../tracing/config.ini'
else:
    raise Exception('config.ini not found')

config = configparser.SafeConfigParser(os.environ)
config.read(config_file)

# Check that urls_file is defined and exists
urls_file = config.get('scheduler', 'urls_file')
print(urls_file)

if not urls_file:
    raise Exception('Set "urls_file" in config.ini')

if not os.path.isfile(urls_file):
    raise Exception("Can't open file '{}'".format(urls_file))

# Create RabbitMQ connection
rabbitmq_host = config.get('rabbitmq', 'host', fallback='localhost', raw=False)
rabbitmq_queue = config.get('rabbitmq', 'queue', fallback='trace_tasks')
Example #12
0
    def parse_args(self, argv=None):
        conf_parser = argparse.ArgumentParser(add_help=False)

        # We have two parsers so we have correct --help, we need -c in both
        conf_parser.add_argument("-c",
                                 "--config-file",
                                 help="Configuration File",
                                 metavar="FILE")

        args, remaining_args = conf_parser.parse_known_args(argv)
        defaults = {}
        config = configparser.SafeConfigParser()
        config.read([os.path.expanduser("~/.op-test-framework.conf")])
        if args.config_file:
            if os.access(args.config_file, os.R_OK):
                config.read([args.config_file])
            else:
                raise OSError(errno.ENOENT, os.strerror(errno.ENOENT),
                              args.config_file)
        try:
            defaults = dict(config.items('op-test'))
        except configparser.NoSectionError:
            pass

        parser = get_parser()
        parser.set_defaults(**defaults)

        if defaults.get('qemu_binary'):
            qemu_default = defaults['qemu_binary']

        if defaults.get('mambo_binary'):
            mambo_default = defaults['mambo_binary']
        if defaults.get('mambo_initial_run_script'):
            mambo_default = defaults['mambo_initial_run_script']

        parser.add_argument(
            "--check-ssh-keys",
            action='store_true',
            default=False,
            help="Check remote host keys when using SSH (auto-yes on new)")
        parser.add_argument("--known-hosts-file",
                            help="Specify a custom known_hosts file")

        self.args, self.remaining_args = parser.parse_known_args(
            remaining_args)

        args_dict = vars(self.args)

        # if we have a bmc_type we start with appropriate template
        if args_dict.get('bmc_type') is not None:
            dict_merge = default_templates.get(
                args_dict.get('bmc_type').lower())
            if dict_merge is not None:
                # overlays dict merge on top of default_val
                default_val.update(dict_merge)

        for key in default_val:
            if args_dict.get(key) is None:
                args_dict[key] = default_val[key]

        stateMap = {
            'UNKNOWN': common.OpTestSystem.OpSystemState.UNKNOWN,
            'UNKNOWN_BAD': common.OpTestSystem.OpSystemState.UNKNOWN_BAD,
            'OFF': common.OpTestSystem.OpSystemState.OFF,
            'PETITBOOT': common.OpTestSystem.OpSystemState.PETITBOOT,
            'PETITBOOT_SHELL':
            common.OpTestSystem.OpSystemState.PETITBOOT_SHELL,
            'OS': common.OpTestSystem.OpSystemState.OS
        }

        # Some quick sanity checking
        if self.args.known_hosts_file and not self.args.check_ssh_keys:
            parser.error("--known-hosts-file requires --check-ssh-keys")

        # Setup some defaults for the output options
        # Order of precedence
        # 1. cmdline arg
        # 2. env variable
        # 3. default path
        if (self.args.output):
            outdir = self.args.output
        elif ("OP_TEST_OUTPUT" in os.environ):
            outdir = os.environ["OP_TEST_OUTPUT"]
        else:
            outdir = os.path.join(self.basedir, "test-reports")

        self.outsuffix = "test-run-%s" % self.get_suffix()
        outdir = os.path.join(outdir, self.outsuffix)

        # Normalize the path to fully qualified and create if not there
        self.output = os.path.abspath(outdir)
        if (not os.path.exists(self.output)):
            os.makedirs(self.output)

        if (self.args.logdir):
            logdir = self.args.logdir
        elif ("OP_TEST_LOGDIR" in os.environ):
            logdir = os.environ["OP_TEST_LOGDIR"]
        else:
            logdir = self.output

        self.logdir = os.path.abspath(logdir)
        if (not os.path.exists(self.logdir)):
            os.makedirs(self.logdir)

        print(("Logs in: {}".format(self.logdir)))

        OpTestLogger.optest_logger_glob.logdir = self.logdir

        # Grab the suffix, if not given use current time
        self.outsuffix = self.get_suffix()

        # set up where all the logs go
        logfile = os.path.join(self.output, "%s.log" % self.outsuffix)

        logcmd = "tee %s" % (logfile)
        # we use 'cat -v' to convert control characters
        # to something that won't affect the user's terminal
        if self.args.quiet:
            logcmd = logcmd + "> /dev/null"
            # save sh_level for later refresh loggers
            OpTestLogger.optest_logger_glob.sh_level = logging.ERROR
            OpTestLogger.optest_logger_glob.sh.setLevel(logging.ERROR)
        else:
            logcmd = logcmd + "| sed -u -e 's/\\r$//g'|cat -v"
            # save sh_level for later refresh loggers
            OpTestLogger.optest_logger_glob.sh_level = logging.INFO
            OpTestLogger.optest_logger_glob.sh.setLevel(logging.INFO)

        OpTestLogger.optest_logger_glob.setUpLoggerFile(
            datetime.utcnow().strftime("%Y%m%d%H%M%S%f") + '.main.log')
        OpTestLogger.optest_logger_glob.setUpLoggerDebugFile(
            datetime.utcnow().strftime("%Y%m%d%H%M%S%f") + '.debug.log')
        OpTestLogger.optest_logger_glob.optest_logger.info(
            'TestCase Log files: {}/*'.format(self.output))
        OpTestLogger.optest_logger_glob.optest_logger.info(
            'StreamHandler setup {}'.format(
                'quiet' if self.args.quiet else 'normal'))

        self.logfile_proc = subprocess.Popen(logcmd,
                                             stdin=subprocess.PIPE,
                                             stderr=sys.stderr,
                                             stdout=sys.stdout,
                                             shell=True,
                                             universal_newlines=True,
                                             encoding='utf-8')
        self.logfile = self.logfile_proc.stdin

        # we have enough setup to allow
        # signal handler cleanup to run
        self.signal_ready = True
        # atexit viable for cleanup to run
        self.atexit_ready = True
        # now that we have loggers, dump conf file to help debug later
        OpTestLogger.optest_logger_glob.optest_logger.debug(
            "conf file defaults={}".format(defaults))
        cmd = "git describe --always"
        try:
            git_output = subprocess.check_output(cmd.split())
            # log for triage of how dated the repo is
            OpTestLogger.optest_logger_glob.optest_logger.debug(
                "op-test-framework git level = {}".format(git_output))
        except Exception as e:
            OpTestLogger.optest_logger_glob.optest_logger.debug(
                "Unable to get git describe")

        # MTU Check here post handler and logging setup
        try:
            self.util.PingMTUCheck(
                self.args.bmc_ip,
                totalSleepTime=BMC_CONST.PING_RETRY_FOR_STABILITY)
        except Exception as e:
            OpTestLogger.optest_logger_glob.optest_logger.warning(
                "Check that the BMC is healthy, maybe the Broadcom bug, Exception={}"
                .format(e))

        # setup AES and Hostlocker configs after the logging is setup
        locker_timeout = time.time() + 60 * self.args.locker_wait
        locker_code = errno.ETIME  # 62
        locker_message = ("OpTestSystem waited {} minutes but was unable"
                          " to lock environment/host requested,"
                          " either pick another environment/host or increase "
                          "--locker-wait, try --aes q with options for "
                          "--aes-search-args to view availability, or as"
                          " appropriate for your hostlocker".format(
                              self.args.locker_wait))
        locker_exit_exception = OpExit(message=locker_message,
                                       code=locker_code)
        while True:
            try:
                rollup_flag = False
                self.util.check_lockers()
                break
            except Exception as e:
                OpTestLogger.optest_logger_glob.optest_logger.debug(
                    "locker_wait Exception={}".format(e))
                if "unable to lock" in e.message:
                    self.aes_print_helpers = False
                    # SystemExit exception needs message to print
                    rollup_message = locker_exit_exception.message
                    rollup_exception = locker_exit_exception
                else:
                    rollup_message = e.message
                    rollup_exception = e
                    rollup_flag = True  # bubble exception out
                if time.time() > locker_timeout or rollup_flag:
                    # if not "unable to lock" we bubble up underlying exception
                    OpTestLogger.optest_logger_glob.optest_logger.warning(
                        "{}".format(rollup_message))
                    raise rollup_exception
                else:
                    OpTestLogger.optest_logger_glob.optest_logger.info(
                        "OpTestSystem waiting for requested environment/host"
                        " total time to wait is {} minutes, we will check"
                        " every minute".format(self.args.locker_wait))
                    time.sleep(60)

        if self.args.machine_state == None:
            if self.args.bmc_type in ['qemu', 'mambo']:
                # Force UNKNOWN_BAD so that we don't try to setup the console early
                self.startState = common.OpTestSystem.OpSystemState.UNKNOWN_BAD
            else:
                self.startState = common.OpTestSystem.OpSystemState.UNKNOWN
        else:
            self.startState = stateMap[self.args.machine_state]
        return self.args, self.remaining_args
Example #13
0
def prepdata(argv):
    """
    Info
    ----
    Delineate basins in the specified area

    Clip globally gridded distributed DEM, Hydrography and River Width data.

    Two parameters are required in this program: 1) Threshold and 2) Extent.
    Threshold to define river network in Km2. For example 5000 will delineate
    a river network with upslope area larger than 5000 Km2. Extent defines 
    clipped area. Four parameters should be indicated: xmin, ymin, xmax and ymax.


    Dependencies
    ------------
    TAUDEM : streamnet, gagewatershed
    GDAL : gdalwarp
    Pandas, Numpy, GDAL Python API, gdalutils, lfptools.prepdata_utils


    Inputs
    ------
    te : Clipping box specified as xmin,ymin,xmax,ymax (no space after commas!)
    out : Output folder
    dem : Any GDAL format (e.g. .tif, .vrt) containing DEM info
    acc : Any GDAL format (e.g. .tif, .vrt) containing accumulation info
    dir : Any GDAL format (e.g. .tif, .vrt) containing flow direction info
    thresh : Threshold to mask flow accumulation in KM**2
    streamnet : Calculate tree and coord files <yes/no>


    Outputs (If running at 30s)
    ---------------------------
    acc30.tif
    acc30_.tif
    area30.tif
    basins30.tif
    basins30d4.tif
    dem3.tif
    dir30.tif
    dir30tau.tif
    dir30tau_mask.tif
    dir30tau_maskd4.tif
    dir30taud4.tif
    net30.tif
    net30d4.tif
    out30.shp
    out30.tif
    out30d4.shp
    out30d4.tif
    stren_net30d4.out (folder)
    stren_net30d8.out (folder)
    stren_w30d4.tif
    stren_w30d8.tif
    strn_coord30d4.txt
    strn_coord30d8.txt
    strn_ord30d4.tif
    strn_ord30d8.tif
    strn_tree30d4.txt
    strn_tree30d8.txt
    """

    opts, args = getopt.getopt(argv, "i:")
    for o, a in opts:
        if o == "-i":
            inifile = a

    config = configparser.SafeConfigParser({
        'overwrite': False,
        'acc_area': False
    })
    config.read(inifile)

    te = np.float64(config.get('prepdata', 'te').split(','))
    out = str(config.get('prepdata', 'out'))
    _dem = str(config.get('prepdata', 'dem'))
    _acc = str(config.get('prepdata', 'acc'))
    _dir = str(config.get('prepdata', 'dir'))
    nproc = str(config.get('prepdata', 'nproc'))
    thresh = np.float64(config.get('prepdata', 'thresh'))
    streamnet = str(config.get('prepdata', 'streamnet'))
    overwrite = config.get('prepdata', 'overwrite').lower() == 'True'.lower()
    acc_area = config.get('prepdata', 'acc_area').lower() == 'True'.lower()

    # Defining extent
    xmin0 = te[0]
    ymin0 = te[1]
    xmax0 = te[2]
    ymax0 = te[3]

    # Getting resolution in degrees
    geo = gdalutils.get_geo(_acc)
    deg = round(geo[6], 4)

    # Checking resolution in arc-sec
    if deg == 0.0083:
        res = 30
    elif deg == 0.0008:
        res = 3

    # Creating an output folder
    try:
        os.makedirs(out)
    except OSError:
        if not os.path.isdir(out):
            raise

    # List of files generated
    dem3tif = out + '/dem3.tif'
    dir3tif = out + '/dir3.tif'

    dir3tau = out + '/dir3tau.tif'
    dir3taud4 = out + '/dir3taud4.tif'
    dir3tau_mask = out + '/dir3tau_mask.tif'
    dir3tau_maskd4 = out + '/dir3tau_maskd4.tif'
    dir30tif = out + '/dir30.tif'
    dir30tau = out + '/dir30tau.tif'
    dir30taud4 = out + '/dir30taud4.tif'
    dir30tau_mask = out + '/dir30tau_mask.tif'
    dir30tau_maskd4 = out + '/dir30tau_maskd4.tif'

    _acc3tif = out + '/acc3_.tif'
    acc3tif = out + '/acc3.tif'
    _acc30tif = out + '/acc30_.tif'
    acc30tif = out + '/acc30.tif'
    # _acc3tif is accumulation in grid cells, acc3tif is accumulation in area
    # If input acc is an area, override _acc3tif by the area version (then dont multiply by area)
    if acc_area:
        _acc3tif = acc3tif
        _acc30tif = acc30tif

    net3tif = out + '/net3.tif'
    net30tif = out + '/net30.tif'
    net3tifd4 = out + '/net3d4.tif'
    net30tifd4 = out + '/net30d4.tif'

    strn_ord3d8 = out + '/strn_ord3d8.tif'
    strn_tree3d8 = out + '/strn_tree3d8.txt'
    strn_coord3d8 = out + '/strn_coord3d8.txt'
    stren_net3d8 = out + '/stren_net3d8.out'
    stren_w3d8 = out + '/stren_w3d8.tif'
    strn_ord3d4 = out + '/strn_ord3d4.tif'
    strn_tree3d4 = out + '/strn_tree3d4.txt'
    strn_coord3d4 = out + '/strn_coord3d4.txt'
    stren_net3d4 = out + '/stren_net3d4.out'
    stren_w3d4 = out + '/stren_w3d4.tif'
    strn_ord30d8 = out + '/strn_ord30d8.tif'
    strn_tree30d8 = out + '/strn_tree30d8.txt'
    strn_coord30d8 = out + '/strn_coord30d8.txt'
    stren_net30d8 = out + '/stren_net30d8.out'
    stren_w30d8 = out + '/stren_w30d8.tif'
    strn_ord30d4 = out + '/strn_ord30d4.tif'
    strn_tree30d4 = out + '/strn_tree30d4.txt'
    strn_coord30d4 = out + '/strn_coord30d4.txt'
    stren_net30d4 = out + '/stren_net30d4.out'
    stren_w30d4 = out + '/stren_w30d4.tif'

    out3shp = out + '/out3.shp'
    out3shpd4 = out + '/out3d4.shp'
    out30shp = out + '/out30.shp'
    out30shpd4 = out + '/out30d4.shp'

    cat3tif = out + '/basins3.tif'
    cat3tifd4 = out + '/basins3d4.tif'
    cat30tif = out + '/basins30.tif'
    cat30tifd4 = out + '/basins30d4.tif'

    are3tif = out + '/area3.tif'
    are30tif = out + '/area30.tif'

    # Snap extent to match input tif grid cells
    geo = gdalutils.get_geo(_dem)
    # Geo has format [xmin, ymin, xmax, ymax, xn, yn, xres, yres, ....]
    xmin = geo[0] + np.floor((xmin0 - geo[0]) / geo[6]) * geo[6]
    ymin = geo[1] + np.floor((ymin0 - geo[1]) / geo[7]) * geo[7]
    xmax = geo[2] + np.floor((xmax0 - geo[2]) / geo[6]) * geo[6]
    ymax = geo[3] + np.floor((ymax0 - geo[3]) / geo[7]) * geo[7]

    # Clipping DEM .vrt files
    if not os.path.exists(dem3tif) or overwrite:
        print('clipping dem to region', xmin, ymin, xmax, ymax)
        subprocess.call([
            "gdalwarp", "-ot", "Float32", "-te",
            str(xmin),
            str(ymin),
            str(xmax),
            str(ymax), "-overwrite", "-dstnodata", "-9999", "-co",
            'COMPRESS=DEFLATE', "-co", "BIGTIFF=YES", _dem, dem3tif
        ])

    ########################################################################################
    # 3s resolution case
    #
    if res == 3:

        # Snap extent to match input tif grid cells
        geo = gdalutils.get_geo(_dir)
        # Geo has format [xmin, ymin, xmax, ymax, xn, yn, xres, yres, ....]
        xmin = geo[0] + np.floor((xmin0 - geo[0]) / geo[6]) * geo[6]
        ymin = geo[1] + np.floor((ymin0 - geo[1]) / geo[7]) * geo[7]
        xmax = geo[2] + np.floor((xmax0 - geo[2]) / geo[6]) * geo[6]
        ymax = geo[3] + np.floor((ymax0 - geo[3]) / geo[7]) * geo[7]
        print('clipping dir and acc fields to region', xmin, ymin, xmax, ymax)

        if not os.path.exists(dir3tif) or overwrite:
            subprocess.call([
                "gdalwarp", "-te",
                str(xmin),
                str(ymin),
                str(xmax),
                str(ymax), "-overwrite", "-co", "BIGTIFF=YES", "-co",
                'COMPRESS=DEFLATE', _dir, dir3tif
            ])

        if not os.path.exists(_acc3tif) or overwrite:
            subprocess.call([
                "gdalwarp", "-te",
                str(xmin),
                str(ymin),
                str(xmax),
                str(ymax), "-overwrite", "-co", "BIGTIFF=YES", "-co",
                'COMPRESS=DEFLATE', _acc, _acc3tif
            ])

        if not os.path.exists(dir3tau) or overwrite:
            print("converting directions into TAUDEM directions...")
            directions_tau(dir3tif, dir3tau)

        if not os.path.exists(are3tif) or overwrite:
            print("calculating area in extent...")
            calculate_area(dir3tau, are3tif)

        if not acc_area and (not os.path.exists(acc3tif) or overwrite):
            print("getting flow accumulation in km2...")
            multiply_rasters(_acc3tif, are3tif, acc3tif)

        if not os.path.exists(net3tif) or overwrite:
            print("thresholding accumulation to get river network...")
            rasterthreshold(acc3tif, thresh, 'Int16', net3tif)

        if not os.path.exists(dir3tau_mask) or overwrite:
            print("masking directions based on river network...")
            rastermask(dir3tau, net3tif, "Int16", dir3tau_mask)

        if not os.path.exists(out3shp) or overwrite:
            print("writing outlets and inland depressions in shapefile...")
            write_outlets(out3shp, dir3tau_mask)

        if not os.path.exists(cat3tif) or overwrite:
            print("writing basins file...")
            subprocess.call([
                "gagewatershed", "-p", dir3tau, "-gw", cat3tif, "-o", out3shp
            ])

        if streamnet == 'yes':
            # Streamnet fails if stren_net exists so remove first
            if os.path.exists(stren_net3d8) and overwrite:
                shutil.rmtree(stren_net3d8)
            if not os.path.exists(stren_net3d8):
                # PFU: input -fel = dem3tif for correct slope in output streamnet
                subprocess.call([
                    "mpiexec", "-n", nproc, "streamnet", "-fel", dem3tif, "-p",
                    dir3tau, "-ad8", acc3tif, "-src", net3tif, "-ord",
                    strn_ord3d8, "-tree", strn_tree3d8, "-coord",
                    strn_coord3d8, "-net", stren_net3d8, "-w", stren_w3d8,
                    "-o", out3shp
                ])

        if not os.path.exists(dir3tau_maskd4) or overwrite:
            print("creating D4 river network...")
            d82d4(dir3tau_mask, dir3tau_maskd4, net3tifd4)

        if not os.path.exists(out3shpd4) or overwrite:
            print("writing D4 outlets and inland depression in shapefile")
            write_outlets(out3shpd4, dir3tau_maskd4)

        if not os.path.exists(dir3taud4) or overwrite:
            print("create flow directions map D4...")
            create_dir_d4(dir3taud4, dir3tau, dir3tau_maskd4)

        if not os.path.exists(cat3tifd4) or overwrite:
            print("writing basins file D4...")
            subprocess.call([
                "gagewatershed", "-p", dir3taud4, "-gw", cat3tifd4, "-o",
                out3shpd4
            ])

        if streamnet == 'yes':
            # Streamnet fails if stren_net exists so remove first
            if os.path.exists(stren_net3d4) and overwrite:
                shutil.rmtree(stren_net3d4)
            if not os.path.exists(stren_net3d4):
                # PFU: input -fel = dem3tif for correct slope in output streamnet
                subprocess.call([
                    "mpiexec", "-n", nproc, "streamnet", "-fel", dem3tif, "-p",
                    dir3tau_maskd4, "-ad8", acc3tif, "-src", net3tifd4, "-ord",
                    strn_ord3d4, "-tree", strn_tree3d4, "-coord",
                    strn_coord3d4, "-net", stren_net3d4, "-w", stren_w3d4,
                    "-o", out3shpd4
                ])

    ########################################################################################
    # 30s resolution case
    #
    elif res == 30:
        # Snap extent to match input tif grid cells
        geo = gdalutils.get_geo(_dir)
        # Geo has format [xmin, ymin, xmax, ymax, xn, yn, xres, yres, ....]
        xmin = geo[0] + np.floor((xmin0 - geo[0]) / geo[6]) * geo[6]
        ymin = geo[1] + np.floor((ymin0 - geo[1]) / geo[7]) * geo[7]
        xmax = geo[2] + np.floor((xmax0 - geo[2]) / geo[6]) * geo[6]
        ymax = geo[3] + np.floor((ymax0 - geo[3]) / geo[7]) * geo[7]
        if not os.path.exists(dir30tif) or overwrite:
            subprocess.call([
                "gdalwarp", "-te",
                str(xmin),
                str(ymin),
                str(xmax),
                str(ymax), "-overwrite", _dir, dir30tif
            ])

        if not os.path.exists(_acc30tif) or overwrite:
            subprocess.call([
                "gdalwarp", "-te",
                str(xmin),
                str(ymin),
                str(xmax),
                str(ymax), "-overwrite", "-co", "BIGTIFF=YES", _acc, _acc30tif
            ])

        if not os.path.exists(dir30tau) or overwrite:
            print("converting directions into TAUDEM directions...")
            directions_tau(dir30tif, dir30tau)

        if not os.path.exists(are30tif) or overwrite:
            print("calculating area in extent...")
            calculate_area(dir30tau, are30tif)

        if not acc_area and (not os.path.exists(acc30tiff) or overwrite):
            print("getting flow accumulation in km2...")
            multiply_rasters(_acc30tif, are30tif, acc30tif)

        if not os.path.exists(net30tif) or overwrite:
            print("thresholding accumulation to get river network...")
            rasterthreshold(acc30tif, thresh, 'Int16', net30tif)

        if not os.path.exists(dir30tau_mask) or overwrite:
            print("masking directions based on river network...")
            rastermask(dir30tau, net30tif, "Int16", dir30tau_mask)

        if not os.path.exists(out30shp) or overwrite:
            print("writing outlets and inland depressions in shapefile...")
            write_outlets(out30shp, dir30tau_mask)

        if not os.path.exists(cat30tif) or overwrite:
            print("writing basins file...")
            subprocess.call([
                "gagewatershed", "-p", dir30tau, "-gw", cat30tif, "-o",
                out30shp
            ])

        if streamnet == 'yes':
            # Streamnet fails if stren_net exists so remove first
            if os.path.exists(stren_net30d8) and overwrite:
                shutil.rmtree(stren_net30d8)
            if not os.path.exists(stren_net30d8):
                # PFU: input -fel should be dem for correct slope in output stremnet
                # BUT we dont have a dem file at 30s
                subprocess.call([
                    "mpiexec", "-n", nproc, "streamnet", "-fel", net30tif,
                    "-p", dir30tau, "-ad8", acc30tif, "-src", net30tif, "-ord",
                    strn_ord30d8, "-tree", strn_tree30d8, "-coord",
                    strn_coord30d8, "-net", stren_net30d8, "-w", stren_w30d8,
                    "-o", out30shp
                ])

        if not os.path.exists(dir30tau_maskd4) or overwrite:
            print("creating D4 river network...")
            d82d4(dir30tau_mask, dir30tau_maskd4, net30tifd4)

        if not os.path.exists(out30shpd4) or overwrite:
            print("writing D4 outlets and inland depression in shapefile...")
            write_outlets(out30shpd4, dir30tau_maskd4)

        if not os.path.exists(dir30taud4) or overwrite:
            print("create flow directions map D4...")
            create_dir_d4(dir30taud4, dir30tau, dir30tau_maskd4)

        if not os.path.exists(cat30tifd4) or overwrite:
            print("writing basins file D4...")
            subprocess.call([
                "gagewatershed", "-p", dir30taud4, "-gw", cat30tifd4, "-o",
                out30shpd4
            ])

        if streamnet == 'yes':
            # Streamnet fails if stren_net exists so remove first
            if os.path.exists(stren_net30d4) and overwrite:
                shutil.rmtree(stren_net30d4)
            if not os.path.exists(stren_net30d4):
                # PFU: input -fel should be dem for correct slope in output stremnet
                # BUT we dont have a dem file at 30s
                subprocess.call([
                    "mpiexec", "-n", nproc, "streamnet", "-fel", net30tifd4,
                    "-p", dir30tau_maskd4, "-ad8", acc30tif, "-src",
                    net30tifd4, "-ord", strn_ord30d4, "-tree", strn_tree30d4,
                    "-coord", strn_coord30d4, "-net", stren_net30d4, "-w",
                    stren_w30d4, "-o", out30shpd4
                ])
Example #14
0
def main():

    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument('-c',
                        '--config',
                        dest="config",
                        help="Configuration file")
    parser.add_argument('--check',
                        dest="check",
                        help="Check bank property file",
                        action="store_true",
                        default=False)
    parser.add_argument('-u',
                        '--update',
                        dest="update",
                        help="Update action",
                        action="store_true",
                        default=False)
    parser.add_argument('--fromscratch',
                        dest="fromscratch",
                        help="Force a new cycle update",
                        action="store_true",
                        default=False)
    parser.add_argument('-z',
                        '--from-scratch',
                        dest="fromscratch",
                        help="Force a new cycle update",
                        action="store_true",
                        default=False)
    parser.add_argument('-p',
                        '--publish',
                        dest="publish",
                        help="Publish",
                        action="store_true",
                        default=False)
    parser.add_argument('--unpublish',
                        dest="unpublish",
                        help="Unpublish",
                        action="store_true",
                        default=False)

    parser.add_argument('--release',
                        dest="release",
                        help="release of the bank")
    parser.add_argument(
        '--from-task',
        dest="from_task",
        help="Start cycle at a specific task (init always executed)")
    parser.add_argument(
        '--process',
        dest="process",
        help=
        "Linked to from-task, optionally specify a block, meta or process name to start from"
    )
    parser.add_argument('-l', '--log', dest="log", help="log level")
    parser.add_argument('-r',
                        '--remove',
                        dest="remove",
                        help="Remove a bank release",
                        action="store_true",
                        default=False)
    parser.add_argument('--remove-all',
                        dest="removeall",
                        help="Remove all bank releases and database records",
                        action="store_true",
                        default=False)
    parser.add_argument('--remove-pending',
                        dest="removepending",
                        help="Remove pending release",
                        action="store_true",
                        default=False)
    parser.add_argument('-s',
                        '--status',
                        dest="status",
                        help="Get status",
                        action="store_true",
                        default=False)
    parser.add_argument('-b', '--bank', dest="bank", help="bank name")
    parser.add_argument('--owner',
                        dest="owner",
                        help="change owner of the bank")
    parser.add_argument('--stop-before',
                        dest="stop_before",
                        help="Store workflow before task")
    parser.add_argument('--stop-after',
                        dest="stop_after",
                        help="Store workflow after task")
    parser.add_argument('--freeze',
                        dest="freeze",
                        help="Freeze a bank release",
                        action="store_true",
                        default=False)
    parser.add_argument('--unfreeze',
                        dest="unfreeze",
                        help="Unfreeze a bank release",
                        action="store_true",
                        default=False)
    parser.add_argument('-f',
                        '--force',
                        dest="force",
                        help="Force action",
                        action="store_true",
                        default=False)
    parser.add_argument('-h',
                        '--help',
                        dest="help",
                        help="Show usage",
                        action="store_true",
                        default=False)

    parser.add_argument('--search',
                        dest="search",
                        help="Search by format and types",
                        action="store_true",
                        default=False)
    parser.add_argument('--formats',
                        dest="formats",
                        help="List of formats to search, comma separated")
    parser.add_argument('--types',
                        dest="types",
                        help="List of types to search, comma separated")
    parser.add_argument('--query',
                        dest="query",
                        help="Lucene query syntax to search in index")

    parser.add_argument('--show',
                        dest="show",
                        help="Show format files for selected bank",
                        action="store_true",
                        default=False)

    parser.add_argument('-n',
                        '--change-dbname',
                        dest="newbank",
                        help="Change old bank name to this new bank name")
    parser.add_argument(
        '-e',
        '--move-production-directories',
        dest="newdir",
        help=
        "Change bank production directories location to this new path, path must exists"
    )
    parser.add_argument('--visibility',
                        dest="visibility",
                        help="visibility status of the bank")

    parser.add_argument('--maintenance',
                        dest="maintenance",
                        help="Maintenance mode (on/off/status)")

    parser.add_argument('--version',
                        dest="version",
                        help="Show version",
                        action="store_true",
                        default=False)
    parser.add_argument('--status-ko',
                        dest="statusko",
                        help="Get bank in KO status",
                        action="store_true",
                        default=False)

    options = Options()
    parser.parse_args(namespace=options)

    options.no_log = False

    if options.help:
        print('''
    --config: global.properties file path

    --status: list of banks with published release
        [OPTIONAL]
        --bank xx / bank: Get status details of bank

    --status-ko: list of banks in error status (last run)

    --log DEBUG|INFO|WARN|ERR  [OPTIONAL]: set log level in logs for this run, default is set in global.properties file

    --check: Check bank property file
        [MANDATORY]
        --bank xx: name of the bank to check (will check xx.properties)

    --owner yy: Change owner of the bank (user id)
        [MANDATORY]
        --bank xx: name of the bank

    --visibility public|private: change visibility public/private of a bank
        [MANDATORY]
        --bank xx: name of the bank

    --change-dbname yy: Change name of the bank to this new name
        [MANDATORY]
        --bank xx: current name of the bank

    --move-production-directories yy: Change bank production directories location to this new path, path must exists
        [MANDATORY]
        --bank xx: current name of the bank

    --update: Update bank
        [MANDATORY]
        --bank xx: name of the bank(s) to update, comma separated
        [OPTIONAL]
        --publish: after update set as *current* version
        --from-scratch: force a new update cycle, even if release is identical, release will be incremented like (myrel_1)
        --stop-before xx: stop update cycle before the start of step xx
        --stop-after xx: stop update cycle after step xx has completed
        --from-task xx --release yy: Force an re-update cycle for bank release *yy* or from current cycle (in production directories), skipping steps up to *xx*
        --process xx: linked to from-task, optionally specify a block, meta or process name to start from
        --release xx: release to update

    --publish: Publish bank as current release to use
        [MANDATORY]
        --bank xx: name of the bank to update
        --release xx: release of the bank to publish
    --unpublish: Unpublish bank (remove current)
        [MANDATORY]
        --bank xx: name of the bank to update

    --remove-all: Remove all bank releases and database records
        [MANDATORY]
        --bank xx: name of the bank to update
        [OPTIONAL]
        --force: remove freezed releases

    --remove-pending: Remove pending releases
        [MANDATORY]
        --bank xx: name of the bank to update

    --remove: Remove bank release (files and database release)
        [MANDATORY]
        --bank xx: name of the bank to update
        --release xx: release of the bank to remove

        Release must not be the *current* version. If this is the case, publish a new release before.

    --freeze: Freeze bank release (cannot be removed)
        [MANDATORY]
        --bank xx: name of the bank to update
        --release xx: release of the bank to remove

    --unfreeze: Unfreeze bank release (can be removed)
        [MANDATORY]
        --bank xx: name of the bank to update
        --release xx: release of the bank to remove

    --search: basic search in bank production releases, return list of banks
       --formats xx,yy : list of comma separated format
      AND/OR
       --types xx,yy : list of comma separated type

       --query "LUCENE query syntax": search in index (if activated)

    --show: Show bank files per format
      [MANDATORY]
      --bank xx: name of the bank to show
      [OPTIONAL]
      --release xx: release of the bank to show

    --maintenance on/off/status: (un)set biomaj in maintenance mode to prevent updates/removal

        ''')
        return

    if options.version:
        version = pkg_resources.require('biomaj')[0].version
        print('Version: ' + str(version))
        return

    if options.stop_after or options.stop_before or options.from_task:
        available_steps = []
        for flow in UpdateWorkflow.FLOW:
            available_steps.append(flow['name'])
        for flow in RemoveWorkflow.FLOW:
            available_steps.append(flow['name'])
        if options.stop_after:
            if options.stop_after not in available_steps:
                print('Invalid step: ' + options.stop_after)
                sys.exit(1)
        if options.stop_before:
            if options.stop_before not in available_steps:
                print('Invalid step: ' + options.stop_before)
                sys.exit(1)
        if options.from_task:
            if options.from_task not in available_steps:
                print('Invalid step: ' + options.from_task)
                sys.exit(1)

    bmaj = None
    try:
        if options.config is not None:
            BiomajConfig.load_config(options.config)
        else:
            BiomajConfig.load_config()
    except Exception as e:
        print(str(e))
        sys.exit(1)

    try:

        if options.maintenance:
            if options.maintenance not in ['on', 'off', 'status']:
                print("Wrong maintenance value [on,off,status]")
                sys.exit(1)
            data_dir = BiomajConfig.global_config.get('GENERAL', 'data.dir')
            if BiomajConfig.global_config.has_option('GENERAL', 'lock.dir'):
                lock_dir = BiomajConfig.global_config.get(
                    'GENERAL', 'lock.dir')
            else:
                lock_dir = data_dir
            maintenance_lock_file = os.path.join(lock_dir, 'biomaj.lock')
            if options.maintenance == 'status':
                if os.path.exists(maintenance_lock_file):
                    print("Maintenance: On")
                else:
                    print("Maintenance: Off")
                sys.exit(0)
            if options.maintenance == 'on':
                f = open(maintenance_lock_file, 'w')
                f.write('1')
                f.close()
                print("Maintenance set to On")
                sys.exit(0)
            if options.maintenance == 'off':
                if os.path.exists(maintenance_lock_file):
                    os.remove(maintenance_lock_file)
                print("Maintenance set to Off")
                sys.exit(0)

        if options.owner:
            if not options.bank:
                print("Bank option is missing")
                sys.exit(1)
            bank = Bank(options.bank, no_log=True)
            bank.set_owner(options.owner)
            sys.exit(0)

        if options.visibility:
            if not options.bank:
                print("Bank option is missing")
                sys.exit(1)
            if options.visibility not in ['public', 'private']:
                print("Valid values are public|private")
                sys.exit(1)
            bank = Bank(options.bank, no_log=True)
            bank.set_visibility(options.visibility)
            print(
                "Do not forget to update accordingly the visibility.default parameter in the configuration file"
            )
            sys.exit(0)

        if options.newdir:
            if not options.bank:
                print("Bank option is missing")
                sys.exit(1)
            if not os.path.exists(options.newdir):
                print("Destination directory does not exists")
            bank = Bank(options.bank, options=options, no_log=True)
            if not bank.bank['production']:
                print("Nothing to move, no production directory")
                sys.exit(0)
            bank.load_session(Workflow.FLOW, None)
            w = Workflow(bank)
            res = w.wf_init()
            if not res:
                sys.exit(1)
            for prod in bank.bank['production']:
                session = bank.get_session_from_release(prod['release'])
                bank.load_session(Workflow.FLOW, session)
                prod_path = bank.session.get_full_release_directory()
                if os.path.exists(prod_path):
                    shutil.move(prod_path, options.newdir)
                prod['data_dir'] = options.newdir
            bank.banks.update(
                {'name': options.bank},
                {'$set': {
                    'production': bank.bank['production']
                }})
            print("Bank production directories moved to " + options.newdir)
            print(
                "WARNING: do not forget to update accordingly the data.dir and dir.version properties"
            )
            w.wf_over()
            sys.exit(0)

        if options.newbank:
            if not options.bank:
                print("Bank option is missing")
                sys.exit(1)
            bank = Bank(options.bank, no_log=True)
            conf_dir = BiomajConfig.global_config.get('GENERAL', 'conf.dir')
            bank_prop_file = os.path.join(conf_dir,
                                          options.bank + '.properties')
            config_bank = configparser.SafeConfigParser()
            config_bank.read(
                [os.path.join(conf_dir, options.bank + '.properties')])
            config_bank.set('GENERAL', 'db.name', options.newbank)
            newbank_prop_file = open(
                os.path.join(conf_dir, options.newbank + '.properties'), 'w')
            config_bank.write(newbank_prop_file)
            newbank_prop_file.close()
            bank.banks.update({'name': options.bank},
                              {'$set': {
                                  'name': options.newbank
                              }})
            os.remove(bank_prop_file)
            print("Bank " + options.bank + " renamed to " + options.newbank)
            sys.exit(0)

        if options.search:
            if options.query:
                res = Bank.searchindex(options.query)
                print("Query matches for :" + options.query)
                results = [["Release", "Format(s)", "Type(s)", "Files"]]
                for match in res:
                    results.append([
                        match['_source']['release'],
                        str(match['_source']['format']),
                        str(match['_source']['types']),
                        ','.join(match['_source']['files'])
                    ])
                print(tabulate(results, headers="firstrow", tablefmt="grid"))
            else:
                formats = []
                if options.formats:
                    formats = options.formats.split(',')
                types = []
                if options.types:
                    types = options.types.split(',')
                print("Search by formats=" + str(formats) + ", types=" +
                      str(types))
                res = Bank.search(formats, types, False)
                results = [[
                    "Name", "Release", "Format(s)", "Type(s)", 'Current'
                ]]
                for bank in sorted(res, key=lambda bank: (bank['name'])):
                    b = bank['name']
                    bank['production'].sort(key=lambda n: n['release'],
                                            reverse=True)
                    for prod in bank['production']:
                        iscurrent = ""
                        if prod['session'] == bank['current']:
                            iscurrent = "yes"
                        results.append([
                            b if b else '', prod['release'],
                            ','.join(prod['formats']), ','.join(prod['types']),
                            iscurrent
                        ])
                        b = None
                print(tabulate(results, headers="firstrow", tablefmt="grid"))
                sys.exit(0)

        if options.show:
            if not options.bank:
                print("Bank option is required")
                sys.exit(1)

            bank = Bank(options.bank, no_log=True)
            results = [[
                "Name", "Release", "Format(s)", "Type(s)", "Tag(s)", "File(s)"
            ]]
            current = None
            fformat = None
            if 'current' in bank.bank and bank.bank['current']:
                current = bank.bank['current']
            for prod in bank.bank['production']:
                include = True
                release = prod['release']
                if current == prod['session']:
                    release += ' (current)'
                if options.release and (prod['release'] != options.release and
                                        prod['prod_dir'] != options.release):
                    include = False
                if include:
                    session = bank.get_session_from_release(prod['release'])
                    formats = session['formats']
                    afiles = []
                    atags = []
                    atypes = []
                    for fformat in list(formats.keys()):
                        for elt in formats[fformat]:
                            atypes.append(','.join(elt['types']))
                            for tag in list(elt['tags'].keys()):
                                atags.append(elt['tags'][tag])
                            for eltfile in elt['files']:
                                afiles.append(eltfile)
                    results.append([
                        bank.bank['name'], release, fformat, ','.join(atypes),
                        ','.join(atags), ','.join(afiles)
                    ])
            print(tabulate(results, headers="firstrow", tablefmt="grid"))
            sys.exit(0)

        if options.check:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            bank = Bank(options.bank, no_log=True)
            print(options.bank + " check: " + str(bank.check()) + "\n")
            sys.exit(0)

        if options.status:
            if options.bank:
                bank = Bank(options.bank, no_log=True)
                info = bank.get_bank_release_info(full=True)
                print(
                    tabulate(info['info'], headers='firstrow',
                             tablefmt='psql'))
                print(
                    tabulate(info['prod'], headers='firstrow',
                             tablefmt='psql'))
                # do we have some pending release(s)
                if 'pend' in info and len(info['pend']) > 1:
                    print(
                        tabulate(info['pend'],
                                 headers='firstrow',
                                 tablefmt='psql'))
            else:
                banks = Bank.list()
                # Headers of output table
                banks_list = [["Name", "Type(s)", "Release", "Visibility"]]
                for bank in sorted(banks, key=lambda k: k['name']):
                    bank = Bank(bank['name'], no_log=True)
                    banks_list.append(bank.get_bank_release_info()['info'])
                print(tabulate(banks_list, headers="firstrow",
                               tablefmt="psql"))
            sys.exit(0)

        if options.statusko:
            banks = Bank.list()
            banks_list = [["Name", "Type(s)", "Release", "Visibility"]]
            for bank in sorted(banks, key=lambda k: k['name']):
                try:
                    bank = Bank(bank['name'], no_log=True)
                    bank.load_session(UpdateWorkflow.FLOW)
                    if bank.session is not None:
                        if bank.use_last_session and not bank.session.get_status(
                                Workflow.FLOW_OVER):
                            wf_status = bank.session.get('workflow_status')
                            if wf_status is None or not wf_status:
                                banks_list.append(
                                    bank.get_bank_release_info()['info'])
                except Exception as e:
                    print(str(e))
            print(tabulate(banks_list, headers="firstrow", tablefmt="psql"))

        if options.update:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            banks = options.bank.split(',')
            gres = True
            for bank in banks:
                options.bank = bank
                bmaj = Bank(bank, options)
                print('Log file: ' + bmaj.config.log_file)
                check_status = bmaj.check()
                if not check_status:
                    print('Skip bank ' + options.bank + ': wrong config')
                    gres = False
                    continue
                res = bmaj.update(depends=True)
                if not res:
                    gres = False
                Notify.notifyBankAction(bmaj)
            if not gres:
                sys.exit(1)

        if options.freeze:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            if not options.release:
                print("Bank release is missing")
                sys.exit(1)
            bmaj = Bank(options.bank, options)
            res = bmaj.freeze(options.release)
            if not res:
                sys.exit(1)

        if options.unfreeze:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            if not options.release:
                print("Bank release is missing")
                sys.exit(1)
            bmaj = Bank(options.bank, options)
            res = bmaj.unfreeze(options.release)
            if not res:
                sys.exit(1)

        if options.remove or options.removeall:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            if options.remove and not options.release:
                print("Bank release is missing")
                sys.exit(1)
            if options.removeall:
                bmaj = Bank(options.bank, options, no_log=True)
                print('Log file: ' + bmaj.config.log_file)
                res = bmaj.removeAll(options.force)
            else:
                bmaj = Bank(options.bank, options)
                print('Log file: ' + bmaj.config.log_file)
                res = bmaj.remove(options.release)
                Notify.notifyBankAction(bmaj)
            if not res:
                sys.exit(1)

        if options.removepending:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            bmaj = Bank(options.bank, options, no_log=True)
            print('Log file: ' + bmaj.config.log_file)
            res = bmaj.remove_pending(options.release)
            if not res:
                sys.exit(1)

        if options.unpublish:
            if not options.bank:
                print("Bank name is missing")
                sys.exit(1)
            bmaj = Bank(options.bank, options, no_log=True)
            bmaj.load_session()
            bmaj.unpublish()
            sys.exit(0)

        if options.publish:
            if not options.bank:
                print("Bank name or release is missing")
                sys.exit(1)
            bmaj = Bank(options.bank, options, no_log=True)
            print('Log file: ' + bmaj.config.log_file)
            bmaj.load_session()
            bank = bmaj.bank
            session = None
            if options.get_option('release') is None:
                # Get latest prod release
                if len(bank['production']) > 0:
                    prod = bank['production'][len(bank['production']) - 1]
                    for s in bank['sessions']:
                        if s['id'] == prod['session']:
                            session = s
                            break
            else:
                # Search production release matching release
                for prod in bank['production']:
                    if prod['release'] == options.release or prod[
                            'prod_dir'] == options.release:
                        # Search session related to this production release
                        for s in bank['sessions']:
                            if s['id'] == prod['session']:
                                session = s
                                break
                        break
            if session is None:
                print("No production session could be found for this release")
                sys.exit(1)
            bmaj.session._session = session
            bmaj.publish()
    except Exception as e:
        print(str(e))
Example #15
0
def multiscan(Files,
              recursive=False,
              configregen=False,
              configfile=CONFIG,
              config=None,
              module_list=None):
    """
    The meat and potatoes. Returns the list of module results

    Files - A list of files and dirs to be scanned
    recursive - If true it will search the dirs in Files recursively
    configregen - If True a new config file will be created overwriting the old
    configfile - What config file to use. Can be None.
    config - A dictionary containing the configuration options to be used.
    """
    # Redirect stdout to stderr
    stdout = sys.stdout
    sys.stdout = sys.stderr
    # TODO: Make sure the cleanup from this works is something breaks

    # Init some vars
    # If recursive is None we don't parse the file list and take it as is.
    if recursive is not None:
        filelist = parseFileList(Files, recursive=recursive)
    else:
        filelist = Files
    # A list of files in the module dir
    if module_list is None:
        module_list = parseDir(MODULEDIR, recursive=True)
    # A dictionary used for the copyfileto parameter
    filedic = {}
    # What will be the config file object
    config_object = None

    # Read in config
    if configfile:
        config_object = configparser.SafeConfigParser()
        config_object.optionxform = str
        # Regen the config if needed or wanted
        if configregen or not os.path.isfile(configfile):
            _rewite_config(module_list, config_object, filepath=configfile)

        config_object.read(configfile)
        main_config = _get_main_config(config_object, filepath=configfile)
        if config:
            file_conf = parse_config(config_object)
            for key in config:
                if key not in file_conf:
                    file_conf[key] = config[key]
                    file_conf[key]['_load_default'] = True
                else:
                    file_conf[key].update(config[key])
            config = file_conf
        else:
            config = parse_config(config_object)
    else:
        if config is None:
            config = {}
        else:
            config['_load_default'] = True
        if 'main' in config:
            main_config = config['main']
        else:
            main_config = DEFAULTCONF

    # If none of the files existed
    if not filelist:
        sys.stdout = stdout
        raise ValueError("No valid files")

    # Copy files to a share if configured
    if "copyfilesto" not in main_config:
        main_config["copyfilesto"] = False
    if main_config["copyfilesto"]:
        if os.path.isdir(main_config["copyfilesto"]):
            filelist = _copy_to_share(filelist, filedic,
                                      main_config["copyfilesto"])
        else:
            sys.stdout = stdout
            raise IOError('The copyfilesto dir" ' +
                          main_config["copyfilesto"] + '" is not a valid dir')

    # Create the global module interface
    global_module_interface = _GlobalModuleInterface()

    # Start a thread for each module
    thread_list = _start_module_threads(filelist, module_list, config,
                                        global_module_interface)

    # Write the default configure settings for missing ones
    if config_object:
        _write_missing_module_configs(module_list,
                                      config_object,
                                      filepath=configfile)

    # Warn about spaces in file names
    for f in filelist:
        if ' ' in f:
            print(
                'WARNING: You are using file paths with spaces. This may result in modules not reporting correctly.'
            )
            break

    # Wait for all threads to finish
    thread_wait_list = thread_list[:]
    i = 0
    while thread_wait_list:
        i += 1
        for thread in thread_wait_list:
            if not thread.is_alive():
                i = 0
                thread_wait_list.remove(thread)
                if VERBOSE:
                    print(thread.name, "took",
                          thread.endtime - thread.starttime)
        if i == 15:
            i = 0
            if VERBOSE:
                p = 'Waiting on'
                for thread in thread_wait_list:
                    p += ' ' + thread.name
                p += '...'
                print(p)
        time.sleep(1)

    # Delete copied files
    if main_config["copyfilesto"]:
        for item in filelist:
            try:
                os.remove(item)
            except OSError:
                pass

    # Get Result list
    results = []
    for thread in thread_list:
        if thread.ret is not None:
            results.append(thread.ret)
        del thread

    # Translates file names back to the originals
    if filedic:
        # I have no idea if this is the best way to do in-place modifications
        for i in range(0, len(results)):
            (result, metadata) = results[i]
            modded = False
            for j in range(0, len(result)):
                (filename, hit) = result[j]
                base = basename(filename)
                if base in filedic:
                    filename = filedic[base]
                    modded = True
                    result[j] = (filename, hit)
            if modded:
                results[i] = (result, metadata)

    # Scan subfiles if needed
    subscan_list = global_module_interface._get_subscan_list()
    if subscan_list:
        # Translate from_filename back to original if needed
        if filedic:
            for i in range(0, len(subscan_list)):
                file_path, from_filename, module_name = subscan_list[i]
                base = basename(from_filename)
                if base in filedic:
                    from_filename = filedic[base]
                    subscan_list[i] = (file_path, from_filename, module_name)

        results.extend(
            _subscan(subscan_list, config, main_config, module_list,
                     global_module_interface))

    global_module_interface._cleanup()

    # Return stdout to previous state
    sys.stdout = stdout
    return results
Example #16
0
def uploadAnalysis(*analysis):
    """
    Process the requests.

    An analysis consists of these parts
    1) Preprocessing function
    2) Differential private mechanisms (here in the form a matrix)
    3) Post processing function
    """
    try:
        # reqId = the current time stamp as a string
        reqId = uuid.uuid1().hex[:3] #TODO : FIX, use all bytes
        os.makedirs(libdp.wd(reqId, "private"))
        os.makedirs(libdp.wd(reqId, "public"))
        request.files['preprocProg'].save(libdp.wd(reqId, "public")
                                          + config.preProc)
        request.files['postprocProg'].save(libdp.wd(reqId, "public")
                                           + config.postProc)
        counter = 0
        numoutputs = int(request.values.get("numoutputs"))
        for counter in range(0, numoutputs):
            analyseType = request.values.get("type[" + str(counter) + "]")
            if analyseType == 'analysesMatrix':
                f = open(libdp.wd(reqId, "public")
                         + str(counter) + '.analysesMatrix', "w")
                libdp.log("Analyses Matrix",
                          data=request.values.get(
                                "analysesData[" + str(counter) + "]"))
                f.write(
                    request.values.get("analysesData[" + str(counter) + "]"))
                f.close()
                counter += 1
            elif analyseType == 'analysesLaplace':
                f = open(libdp.wd(reqId, "public")
                         + str(counter) + '.analysesLaplace', "w")
                libdp.log("Analyses Laplace",
                          data=(counter, request.values.get(
                                "analysesData[" + str(counter) + "]")))
                f.write(request.values.get(
                    "analysesData[" + str(counter) + "]"))
                f.close()
                counter += 1
            else:
                libdp.log("Analysis Execution", msg="Unknown Analysis")
                raise Exception('Analysis Execution', 'Unknown Analysis')

        parser = configparser.SafeConfigParser()
        parser.add_section('config')
        parser.set('config', 'preProcParameters', request.values.get("preparameters"))
        parser.set('config', 'preResources', request.values.get("preresource"))

        parser.set('config', 'numOutput', str(counter))

        parser.set('config', 'postProcParameters', request.values.get("postparameters"))
        parser.set('config', 'postResources', request.values.get("postresource"))
        parser.write(
            open(libdp.wd(reqId, "public") + config.reqInfo, "w"))
    except:
        traceback.print_exc()

    libdp.log("Running sandbox")
    threadx = threading.Thread(target=userProcedure, args=(reqId,))
    threadx.start()
    return str(reqId)
Example #17
0
def options(optlist, option_list):

    # option_list format is: section, name, type, default
    # section names are used in the output dict only if names are not unique

    odict = {}
    config = configparser.SafeConfigParser()
    filenames = []
    cmdlineset = []
    if isinstance(optlist, str):
        optlist = [optlist]

    for o in optlist:
        if o[:2] == '--':
            optstring = o[2:]
            result = re.match('(\w*)-(\w*)\s*=\s*(.*)', optstring)
            if result is None:
                print('Cannot parse option', optstring)
            else:
                cmdlineset.append(result.groups())
        else:
            filenames.append(o)

    for f in filenames:
        if not os.path.isfile(f):
            print('Config file', f, 'does not exist!')
            sys.exit(1)

    config.read(filenames)
    for c in cmdlineset:
        for o in option_list:
            (section, name, otype, default) = o[:4]
            if c[0] == section and c[1] == name:
                break
        else:
            print('Option %s-%s does not exist!' % (c[0], c[1]))
            sys.exit(2)
        try:
            config.add_section(c[0])
        except configparser.DuplicateSectionError:
            pass
        config.set(c[0], c[1], c[2])
    cased = {
        int: config.getint,
        float: config.getfloat,
        bool: config.getboolean,
        str: config.get,
        list: lambda x, y: eval(config.get(x, y))
    }
    for o in option_list:
        (section, name, otype, default) = o[:4]
        # if this name is duplicated in another section, we need to know
        count = 0
        for o2 in option_list:
            if o2[1] == name: count += 1
        # get result
        try:
            result = cased[otype](section, name)
        except (configparser.NoSectionError, configparser.NoOptionError):
            result = default
        # python2 compatibility, force back to str
        try:
            if isinstance(result, unicode):
                result = result.encode("utf-8")
        except NameError:  # unicode type doesn't exist, we are in py3
            pass
        if otype is str and result == "None":
            result = None
        if count > 1:
            odict[section + '_' + name] = result
        else:
            odict[name] = result
    return odict
    def _parse_args(self, args_str):
        '''
        Eg. python provision_sub_cluster.py --sub_cluster_name foo
                                        --api_server_ip 127.0.0.1
                                        --api_server_port 8082
                                        --api_server_use_ssl False
                                        --sub_cluster_asn 1-65535
                                        --oper <add | del>
                                        [--sub_cluster_id 1-4294967295]
        '''

        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument("-c",
                                 "--conf_file",
                                 help="Specify config file",
                                 metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str.split())

        defaults = {
            'api_server_ip': '127.0.0.1',
            'api_server_port': '8082',
            'api_server_use_ssl': False,
            'oper': 'add',
            'sub_cluster_asn': '64513',
        }
        ksopts = {
            'admin_user': '******',
            'admin_password': '******',
            'admin_tenant_name': 'default-domain'
        }

        if args.conf_file:
            config = configparser.SafeConfigParser()
            config.read([args.conf_file])
            defaults.update(dict(config.items("DEFAULTS")))
            if 'KEYSTONE' in config.sections():
                ksopts.update(dict(config.items("KEYSTONE")))

        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        defaults.update(ksopts)
        parser.set_defaults(**defaults)

        parser.add_argument("--sub_cluster_name",
                            help="name of sub cluster",
                            required=True)
        parser.add_argument("--api_server_ip",
                            help="IP address of api server",
                            required=True)
        parser.add_argument("--api_server_port", help="Port of api server")
        parser.add_argument("--api_server_use_ssl",
                            help="Use SSL to connect with API server")
        parser.add_argument("--oper",
                            default='add',
                            help="Provision operation to be done(add or del)")
        parser.add_argument("--admin_user", help="Name of keystone admin user")
        parser.add_argument("--admin_password",
                            help="Password of keystone admin user")
        parser.add_argument("--admin_tenant_name",
                            help="Tenant name for keystone admin user")
        parser.add_argument("--openstack_ip",
                            help="Openstack IP for authentication")
        parser.add_argument("--sub_cluster_asn", help="Sub cluster's ASN")
        parser.add_argument(
            "--sub_cluster_id",
            help=("Sub cluster's ID between 1-4294967295 depending of the "
                  "size of the global ASN. If not provided, an ID is "
                  "automatically allocated"))
        self._args = parser.parse_args(remaining_argv)
Example #19
0
    def read_settings(self):
        ''' Reads the settings from the alicloud.ini file '''

        config = configparser.SafeConfigParser()

        ecs_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'alicloud.ini')
        ecs_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('ALICLOUD_INI_PATH', ecs_default_ini_path)))
        config.read(ecs_ini_path)

        access_key = os.environ.get('ALICLOUD_ACCESS_KEY', os.environ.get('ALICLOUD_ACCESS_KEY_ID', None))
        if not access_key:
            access_key = self.get_option(config, 'credentials', 'alicloud_access_key')

        secret_key = os.environ.get('ALICLOUD_SECRET_KEY', os.environ.get('ALICLOUD_SECRET_ACCESS_KEY', None))
        if not secret_key:
            secret_key = self.get_option(config, 'credentials', 'alicloud_secret_key')

        security_token = os.environ.get('ALICLOUD_SECURITY_TOKEN', None)
        if not security_token:
            security_token = self.get_option(config, 'credentials', 'alicloud_security_token')

        self.credentials = {
            'acs_access_key_id': access_key,
            'acs_secret_access_key': secret_key,
            'security_token': security_token,
        }

        # Regions
        config_regions = self.get_option(config, 'ecs', 'regions')
        if not config_regions or config_regions == 'all':
            all_regions = self.connect_to_ecs(footmark.ecs, "cn-beijing").get_all_regions()

            exclude_regions = []
            if self.get_option(config, 'ecs', 'regions_exclude'):
                exclude_regions = [ex.strip() for ex in self.get_option(config, 'ecs', 'regions_exclude').split(',') if ex.strip()]

            for region in all_regions:
                if exclude_regions and region.id in exclude_regions:
                    continue
                self.regions.append(region.id)
        else:
            self.regions = config_regions.split(",")

        # # Destination addresses
        self.destination_variable = self.get_option(config, 'ecs', 'destination_variable', "")

        self.hostname_variable = self.get_option(config, 'ecs', 'hostname_variable', "")

        self.destination_format = self.get_option(config, 'ecs', 'destination_format', "")
        self.destination_format_tags = self.get_option(config, 'ecs', 'destination_format_tags', "")

        # Instance states to be gathered in inventory. Default is 'running'.
        ecs_valid_instance_states = ['pending', 'running', 'starting', 'stopping', 'stopped']

        if self.get_option(config, 'ecs', 'all_instances'):
            self.ecs_instance_states.extend(ecs_valid_instance_states)
        elif self.get_option(config, 'ecs', 'instance_states'):
            for instance_state in self.get_option(config, 'ecs', 'instance_states').split(","):
                instance_state = instance_state.strip()
                if instance_state not in ecs_valid_instance_states:
                    continue
                self.ecs_instance_states.append(instance_state)
        else:
            self.ecs_instance_states.append('running')

        # Cache related
        cache_dir = os.path.expanduser(self.get_option(config, 'ecs', 'cache_path'))
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)

        cache_name = 'ansible-alicloud'
        self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
        self.cache_path_index = cache_dir + "/%s.index" % cache_name
        self.cache_max_age = float(self.get_option(config, 'ecs', 'cache_max_age'))

        self.expand_csv_tags = self.get_option(config, 'ecs', 'expand_csv_tags')

        # Configure nested groups instead of flat namespace.
        self.nested_groups = self.get_option(config, 'ecs', 'nested_groups')

        # Configure which groups should be created.
        group_by_options = [
            'group_by_instance_id',
            'group_by_region',
            'group_by_availability_zone',
            'group_by_instance_type',
            'group_by_image_id',
            'group_by_vpc_id',
            'group_by_vswitch_id',
            'group_by_security_group',
            'group_by_tag_keys',
            'group_by_tag_none'
        ]
        for option in group_by_options:
            setattr(self, option, self.get_option(config, 'ecs', option))

        # Do we need to just include hosts that match a pattern?
        try:
            pattern_include = self.get_option(config, 'ecs', 'pattern_include')
            if pattern_include and len(pattern_include) > 0:
                self.pattern_include = re.compile(pattern_include)
        except configparser.NoOptionError:
            raise

        # Do we need to exclude hosts that match a pattern?
        try:
            pattern_exclude = self.get_option(config, 'ecs', 'pattern_exclude')
            if pattern_exclude and len(pattern_exclude) > 0:
                self.pattern_exclude = re.compile(pattern_exclude)
        except configparser.NoOptionError:
            raise

        instance_filters = self.get_option(config, 'ecs', 'instance_filters')
        if instance_filters and len(instance_filters) > 0:
            tags = {}
            for field in instance_filters.split(','):
                field = field.strip()
                if not field or '=' not in field:
                    continue
                key, value = [x.strip() for x in field.split('=', 1)]
                if not key:
                    continue
                elif key.startswith("tag:"):
                    tags[key[4:]] = value
                    continue
                elif key in ['page_size', 'page_number']:
                    try:
                        if value and int(value):
                            value = int(value)
                    except Exception:
                        raise
                self.ecs_instance_filters[key] = value
            if tags:
                self.ecs_instance_filters['tags'] = tags
Example #20
0
    def parse_input(self, ConfigFile):
        """
        Extract the parameters from the section [run] of the ConfigFile.
        
        Parameters
        ----------
        ConfigFile : str
            Loaded configuration file.
        
        Returns
        -------
        input_file : str
            Path of the input file.
        output_file : str
            Path of the output file.
        output_dir : str
            Path of the output directory.
        instrument : str
            Keyword to include the name of the instrumentational
            set up of the mock observation to be added in the
            output_dir name if it was not previously specified.
        nvector : int
            Number of particles to be projected simultaneously.
        ncpu : int
            Number of cores for parallel execution.
        overwrite : bool
            Allows overwriting an existing file when saving the output.
        simulation_id : str
            Keyword to be store in the FITS header as the parent
            simulation identification.
        snapshot_id : str
            Keyword to be store in the FITS header as the snapshot
            identification.
        reference_id : str
            Keyword to be store in the FITS header as the reference
            frame identification for geometrical transformations.
        nfft : int
            Number of different scales of particles.
        fft_hsml_min : astropy.units.core.Unit
            Minimum size of particles in (pc).
        fft_scales : astropy.units.core.Unit
            Path of the file that list of different scales to pack the
            particles in (Kpc).
        spatial_convolution : str
            Keyword to store the method by which the spatial convolution
            will be performed.
            Options:
            *  spatial_astropy (default): Convolution using the Astropy
               library.
            *  spatial_astorpy_fft : Convolution using Fast Fourier
               transform from the Astropy library.
            *  spatial_aurora_fft : Convolution using Fast Fourier
               transform.
        spectral_convolution : str
            Keyword to store the method by which the spectral convolution
            will be performed.
            Options:
         *  analytical (default): Analytical convolution between the 
            Gaussian emission lines and the Gaussian kernel of the LSF.
         *  spectral_astropy : Convolution using the Astropy library.
         *  spectral_astorpy_fft : Convolution using Fast Fourier 
            transform from the Astropy library.
         *  spectral_aurora_fft : Convolution using Fast Fourier
            transform.
        """

        run_config = configparser.SafeConfigParser({}, allow_no_value=True)
        run_config.read(ConfigFile)

        self.input_file = read_var(run_config, "run", "input_file", str)
        self.output_file = read_var(run_config, "run", "output_file", str)
        self.output_dir = read_var(run_config, "run", "custom_dir", str)
        self.instrument = read_var(run_config, "run", "instrument", str)
        self.nvector = read_var(run_config, "run", "nvector", int)
        self.ncpu = read_var(run_config, "run", "ncpu", int)
        self.overwrite = read_var(run_config, "run", "overwrite", bool)
        self.simulation_id = read_var(run_config, "run", "simulation_id", str)
        self.snapshot_id = read_var(run_config, "run", "snapshot_id", str)
        self.reference_id = read_var(run_config, "run", "reference_id", str)
        self.nfft = read_var(run_config, "run", "nfft", int)
        self.fft_hsml_min = read_var(run_config, "run", "fft_hsml_min", float,
                                     unit.pc)
        self.fft_scales = read_var(run_config, "run", "fft_scales", str)
        self.spatial_convolution = read_var(run_config, "run",
                                            "spatial_convolution", str)
        self.spectral_convolution = read_var(run_config, "run",
                                             "spectral_convolution", str)
Example #21
0
 def get_search_config(self):
     self.config = configparser.SafeConfigParser()
     self.config.read(self.config_file_path)
Example #22
0
    def parse_input(self, ConfigFile):
        """
        Extract parameters from the section [geometry] of the ConfigFile.
        
        Parameters
        ----------
        ConfigFile : str
            Loaded configuration file.
        
        Returns
        -------
        redshift : float
            Redshift where the galaxy is located.
        dl : astropy.units.core.Unit 
            Luminosity distance where the galaxy is located in (Mpc)
        dist_angular : astropy.units.core.Unit
            Angular diameter distance in (Mpc).
        lambda_obs : astropy.units.core.Unit
            Central observed wavelength in (angstrom).
        theta : astropy.units.core.Unit
            Orientation angle of the major axis of the projected galaxy
            in (deg).
        phi : astropy.units.core.Unit
            Angle of inclination of the disc with respect to the line
            of sight in (deg).
        barycenter : bool
            Allows to calculate the center of the galaxy based on the 
            ssc centering-scheme of astropy.
        centerx : astropy.units.core.Unit
            Center of the galaxy in the X axis in (kpc).
        centery : astropy.units.core.Unit
            Center of the galaxy in the Y axis in (kpc).
        centerz : astropy.units.core.Unit
            Center of the galaxy in the Z axis in (kpc).
        reference : str
            Path of the reference file.
        gas_minmax_keys : str
            Specific properties to filter the gas
            particles.
        gas_minmax_units : str
            Units of the specific properties to filter 
            the gas particles.
        gas_min_values : str
            Minimum boundary to filter the gas particles.
        gas_max_values : str
            Maximum boundary to filter the gas particles.
        """

        g_conf = configparser.SafeConfigParser(allow_no_value=True)
        g_conf.read(ConfigFile)

        self.redshift = read_var(g_conf, "geometry", "redshift", float)
        self.dl = read_var(g_conf, "geometry", "dist_lum", float, unit.Mpc)
        self.dist_angular = read_var(g_conf, "geometry", "dist_angular", float,
                                     unit.Mpc)
        self.lambda_obs = read_var(g_conf, "geometry", "lambda_obs", float,
                                   unit.angstrom)
        self.theta = read_var(g_conf, "geometry", "theta", float, unit.deg)
        self.phi = read_var(g_conf, "geometry", "phi", float, unit.deg)
        self.barycenter = read_var(g_conf, "geometry", "barycenter", bool)
        self.centerx = read_var(g_conf, "geometry", "centerx", float, unit.kpc)
        self.centery = read_var(g_conf, "geometry", "centery", float, unit.kpc)
        self.centerz = read_var(g_conf, "geometry", "centerz", float, unit.kpc)
        self.reference = read_var(g_conf, "geometry", "reference_frame", str)

        self.gas_minmax_keys = read_var(g_conf, "geometry", "gas_minmax_keys",
                                        str)
        self.gas_minmax_units = read_var(g_conf, "geometry",
                                         "gas_minmax_units", str)
        self.gas_min_values = read_var(g_conf, "geometry", "gas_min_values",
                                       str)
        self.gas_max_values = read_var(g_conf, "geometry", "gas_max_values",
                                       str)

        # Filter the gas particles according to the specified properties and boundaries (if any)

        if (self.gas_minmax_keys != ""):
            self.gas_minmax_keys = re.split(
                ",|;", "".join(self.gas_minmax_keys.split()))
        if (self.gas_min_values != ""):
            self.gas_min_values = (np.array(
                re.split(",|;", "".join(self.gas_min_values.split())))).astype(
                    np.float)
            if (len(self.gas_minmax_keys) != len(self.gas_min_values)):
                logging.error(
                    "The number of elements in gas_minmax_keys and gas_min_values should be equal"
                )
                sys.exit()
        if (self.gas_max_values != ""):
            self.gas_max_values = (np.array(
                re.split(",|;", "".join(self.gas_max_values.split())))).astype(
                    np.float)
            if (len(self.gas_minmax_keys) != len(self.gas_max_values)):
                logging.error(
                    "The number of elements in gas_minmax_keys and gas_max_values should be equal"
                )
                sys.exit()
Example #23
0
    def cleaned_config(self):
        shutil.copy(self.configpath, self.cleanpath)
        tmpconfig = configparser.SafeConfigParser()
        tmpconfig.readfp(codecs.open(self.cleanpath, 'r', 'utf8'))
        cleaned_list = {
            ('Interface', 'http_password'), ('SABnzbd', 'sab_username'),
            ('SABnzbd', 'sab_password'), ('SABnzbd', 'sab_apikey'),
            ('NZBGet', 'nzbget_username'), ('NZBGet', 'nzbget_password'),
            ('NZBsu', 'nzbsu_apikey'), ('DOGnzb', 'dognzb_apikey'),
            ('uTorrent', 'utorrent_username'),
            ('uTorrent', 'utorrent_password'),
            ('Transmission', 'transmission_username'),
            ('Transmission', 'transmission_password'),
            ('Deluge', 'deluge_username'), ('Deluge', 'deluge_password'),
            ('qBittorrent', 'qbittorrent_username'),
            ('qBittorrent', 'qbittorrent_password'),
            ('Rtorrent', 'rtorrent_username'),
            ('Rtorrent', 'rtorrent_password'), ('Prowl', 'prowl_keys'),
            ('PUSHOVER', 'pushover_apikey'), ('PUSHOVER', 'pushover_userkey'),
            ('BOXCAR', 'boxcar_token'), ('PUSHBULLET', 'pushbullet_apikey'),
            ('NMA', 'nma_apikey'), ('TELEGRAM', 'telegram_token'),
            ('CV', 'comicvine_api'), ('32P', 'password_32p'),
            ('32P', 'passkey_32p'), ('32P', 'username_32p'),
            ('32P', 'rssfeed_32p'), ('Seedbox', 'seedbox_user'),
            ('Seedbox', 'seedbox_pass'), ('Seedbox', 'seedbox_port'),
            ('Tablet', 'tab_pass'), ('API', 'api_key'),
            ('OPDS', 'opds_password'), ('AutoSnatch', 'pp_sshpasswd'),
            ('AutoSnatch', 'pp_sshport'), ('Email', 'email_password'),
            ('Email', 'email_user')
        }

        for v in cleaned_list:
            try:
                if all([
                        tmpconfig.get(v[0], v[1]) is not None,
                        tmpconfig.get(v[0], v[1]) != 'None'
                ]):
                    tmpconfig.set(v[0], v[1], 'xXX[REMOVED]XXx')
            except configparser.NoSectionError as e:
                pass

        hostname_list = {('SABnzbd', 'sab_host'), ('NZBGet', 'nzbget_host'),
                         ('Torznab', 'torznab_host'),
                         ('uTorrent', 'utorrent_host'),
                         ('Transmission', 'transmission_host'),
                         ('Deluge', 'deluge_host'),
                         ('qBittorrent', 'qbittorrent_host'),
                         ('Interface', 'http_host'),
                         ('Rtorrent', 'rtorrent_host'),
                         ('AutoSnatch', 'pp_sshhost'), ('Tablet', 'tab_host'),
                         ('Seedbox', 'seedbox_host'),
                         ('Email', 'email_server')}

        for h in hostname_list:
            if all([
                    tmpconfig.get(h[0], h[1]) is not None,
                    tmpconfig.get(h[0], h[1]) != 'None'
            ]):
                tmpconfig.set(h[0], h[1], 'xXX[REMOVED]XXx')

        extra_newznabs = list(
            zip(*
                [iter(tmpconfig.get('Newznab', 'extra_newznabs').split(', '))
                 ] * 6))
        extra_torznabs = list(
            zip(*
                [iter(tmpconfig.get('Torznab', 'extra_torznabs').split(', '))
                 ] * 5))
        cleaned_newznabs = []
        cleaned_torznabs = []
        for ens in extra_newznabs:
            n_host = None
            n_uid = None
            n_api = None
            if ens[1] is not None:
                n_host = 'xXX[REMOVED]XXx'
            if ens[3] is not None:
                n_api = 'xXX[REMOVED]XXx'
            if ens[4] is not None:
                n_uid = 'xXX[REMOVED]XXx'
            newnewzline = (ens[0], n_host, ens[2], n_api, n_uid, ens[5])
            cleaned_newznabs.append(newnewzline)

        for ets in extra_torznabs:
            n_host = None
            n_uid = None
            n_api = None
            if ets[1] is not None:
                n_host = 'xXX[REMOVED]XXx'
            if ets[2] is not None:
                n_api = 'xXX[REMOVED]XXx'
            if ets[4] is not None:
                n_uid = 'xXX[REMOVED]XXx'
            newtorline = (ets[0], n_host, n_api, ets[3], ets[4])
            cleaned_torznabs.append(newtorline)

        tmpconfig.set('Newznab', 'extra_newznabs',
                      ', '.join(self.write_extras(cleaned_newznabs)))
        tmpconfig.set('Torznab', 'extra_torznabs',
                      ', '.join(self.write_extras(cleaned_torznabs)))
        try:
            with codecs.open(self.cleanpath, encoding='utf8',
                             mode='w+') as tmp_configfile:
                tmpconfig.write(tmp_configfile)
            logger.fdebug(
                'Configuration cleaned of keys/passwords and written to temporary location.'
            )
        except IOError as e:
            logger.warn("Error writing configuration file: %s" % e)
Example #24
0
    def parse_input(self, ConfigFile):
        """
        Extract parameters from the section [spectrom] of the ConfigFile
        
        Parameters
        ----------
        ConfigFile : str
            Loaded configuration file.
        
        Returns
        -------
        presets : str
            Name of the instrument to mimic, some options are: sinfoni,
            eagle, ghasp, muse-wide, etc. See all options in presets.py.
        spatial_sampl : astropy.units.core.Unit 
            Pixel size of the instrument in (arsec).
        spectral_sampl : astropy.units.core.Unit
            Spectral sampling of the instrument in (angstrom).
        spatial_res : astropy.units.core.Unit
            Spatial resolution of the instrument in (arsec).
        spectral_res : float
            Spectral resolution of the instrument.
        spatial_dim : int
            Number of pixels per side of the field of view.
        spectral_dim : int
            Number of spectral channels of the instrument.
        sigma_cont : float
            Target signal to noise ratio of the instrument.
        redshift_ref : float
            Reference redshift used to calculate the fraction
            of ionized hydrogen using the procedure exposed in
            (Rahmati et al 2012). See more info in rahmati.py.
        pixsize : astropy.units.core.Unit
            Pixel size of the instrument in (pc).
        velocity_sampl : astropy.units.core.Unit
            Spectral sampling of the instrument in velocity units
            (km s-1).
        fieldofview : astropy.units.core.Unit
            Size of one side of the square field of view of the
            instrument in (kpc).
        FoV_arcsec : astropy.units.core.Unit
            Size of one side of the square field of view of the
            instrument in (arsec).
        velocity_range : astropy.units.core.Unit
            Spectral range of the instrument in velocity units
            (km s-1).
        spectral_range : astropy.units.core.Unit
            Spectral range of the instrument in (angstrom).
        spatial_res_kpc : astropy.units.core.Unit
            Spatial resolution of the instrument in (kpc).
        kernel_scale : float
            Constant that apply a higher smoothing to the projected
            luminosity of the particles, especially for RAMSES-type
            simulations.
        oversampling : int
            Number by which the pixel size is going to be oversampled
            when the convolution is carried out, to minimize
            numerical errors.
        lum_dens_rel : str
            Ions number density dependence to calculate the H-alpha
            emission. The options are: square (default), linear or
            root. See more info in emitters.py.
        density_threshold : str
            Density threshold that allows to change the H-alpha
            emission for certain gas particles that exceed the
            established limit. See more info in emitters.py.  
        equivalent_luminosity : str
            Equivalent luminosity that replace the H-alpha emission
            for certain gas particles that exceed the established
            density threshold. See more info in emitters.py. 
        """

        spec_conf = configparser.SafeConfigParser(allow_no_value=True)
        spec_conf.read(ConfigFile)
        self.presets = read_var(spec_conf, "spectrom", "presets", str)

        if self.presets in presets.Instruments.keys():
            self.spatial_sampl = presets.Instruments[
                self.presets]["spatial_sampl"]
            self.spatial_sampl = float(self.spatial_sampl) * unit.arcsec
            self.spectral_sampl = presets.Instruments[
                self.presets]["spectral_sampl"]
            self.spectral_sampl = float(self.spectral_sampl) * unit.angstrom
            self.spatial_res = presets.Instruments[self.presets]["spatial_res"]
            self.spatial_res = float(self.spatial_res) * unit.arcsec
            self.spectral_res = presets.Instruments[
                self.presets]["spectral_res"]
            self.spectral_res = float(self.spectral_res)
            self.spatial_dim = presets.Instruments[self.presets]["spatial_dim"]
            self.spatial_dim = int(self.spatial_dim)
            self.spectral_dim = presets.Instruments[
                self.presets]["spectral_dim"]
            self.spectral_dim = int(self.spectral_dim)
            self.sigma_cont = presets.Instruments[self.presets]["target_snr"]
            self.sigma_cont = float(self.sigma_cont)
        else:
            self.spatial_sampl = read_var(spec_conf, "spectrom",
                                          "spatial_sampl", float, unit.arcsec)
            self.spectral_sampl = read_var(spec_conf, "spectrom",
                                           "spectral_sampl", float,
                                           unit.angstrom)
            self.spatial_res = read_var(spec_conf, "spectrom", "spatial_res",
                                        float, unit.arcsec)
            self.spectral_res = read_var(spec_conf, "spectrom", "spectral_res",
                                         float)
            self.spatial_dim = read_var(spec_conf, "spectrom", "spatial_dim",
                                        int)
            self.spectral_dim = read_var(spec_conf, "spectrom", "spectral_dim",
                                         int)
            self.sigma_cont = read_var(spec_conf, "spectrom", "sigma_cont",
                                       float)

        self.redshift_ref = read_var(spec_conf, "spectrom", "redshift_ref",
                                     float)
        self.pixsize = read_var(spec_conf, "spectrom", "pixsize", float,
                                unit.pc)
        self.velocity_sampl = read_var(spec_conf, "spectrom", "velocity_sampl",
                                       float, unit.km / unit.s)
        self.fieldofview = read_var(spec_conf, "spectrom", "fieldofview",
                                    float, unit.kpc)
        self.FoV_arcsec = read_var(spec_conf, "spectrom", "FoV_arsec", float,
                                   unit.arcsec)
        self.velocity_range = read_var(spec_conf, "spectrom", "velocity_range",
                                       float, unit.km / unit.s)
        self.spectral_range = read_var(spec_conf, "spectrom", "spectral_range",
                                       float, unit.angstrom)
        self.spatial_res_kpc = read_var(spec_conf, "spectrom",
                                        "spatial_res_kpc", float, unit.kpc)
        self.kernel_scale = read_var(spec_conf, "spectrom", "kernel_scale",
                                     float)
        self.oversampling = read_var(spec_conf, "spectrom", "oversampling",
                                     int)
        self.lum_dens_rel = read_var(spec_conf, "spectrom",
                                     "lum_dens_relation", str)
        self.density_threshold = read_var(spec_conf, "spectrom",
                                          "density_threshold", str)
        self.equivalent_luminosity = read_var(spec_conf, "spectrom",
                                              "equivalent_luminosity", str)
Example #25
0
    def __init__(self, directory, filename, logger=None):

        # Setup logging
        if logger:
            log = logger
        else:
            log = logging.getLogger(__name__)

        # Setup encoding to avoid UTF-8 errors
        if sys.version[0] == '2':
            SYS_ENCODING = None
            try:
                locale.setlocale(locale.LC_ALL, "")
                SYS_ENCODING = locale.getpreferredencoding()
            except (locale.Error, IOError):
                pass

            # For OSes that are poorly configured just force UTF-8
            if not SYS_ENCODING or SYS_ENCODING in ('ANSI_X3.4-1968',
                                                    'US-ASCII', 'ASCII'):
                SYS_ENCODING = 'UTF-8'

            if not hasattr(sys, "setdefaultencoding"):
                reload(sys)

            try:
                # pylint: disable=E1101
                # On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
                sys.setdefaultencoding(SYS_ENCODING)
            except:
                log.exception(
                    "Sorry, your environment is not setup correctly for utf-8 support. Please fix your setup and try again"
                )
                sys.exit(
                    "Sorry, your environment is not setup correctly for utf-8 support. Please fix your setup and try again"
                )

        # Default settings for SickBeard
        sb_defaults = {
            'host': 'localhost',
            'port': '8081',
            'ssl': "False",
            'api_key': '',
            'web_root': '',
            'username': '',
            'password': ''
        }
        # Default MP4 conversion settings
        mp4_defaults = {
            'ffmpeg': 'ffmpeg.exe',
            'ffprobe': 'ffprobe.exe',
            'threads': 'auto',
            'output_directory': '',
            'copy_to': '',
            'move_to': '',
            'output_extension': 'mp4',
            'output_format': 'mp4',
            'delete_original': 'True',
            'relocate_moov': 'True',
            'ios-audio': 'True',
            'ios-first-track-only': 'False',
            'max-audio-channels': '',
            'audio-language': '',
            'audio-default-language': '',
            'audio-codec': 'ac3',
            'audio-channel-bitrate': '256',
            'video-codec': 'h264, x264',
            'video-bitrate': '',
            'video-max-width': '',
            'h264-max-level': '',
            'subtitle-codec': 'mov_text',
            'subtitle-language': '',
            'subtitle-default-language': '',
            'subtitle-encoding': 'utf-8',
            'convert-mp4': 'False',
            'fullpathguess': 'True',
            'tagfile': 'True',
            'tag-language': 'en',
            'download-artwork': 'poster',
            'download-subs': 'False',
            'embed-subs': 'True',
            'sub-providers': 'addic7ed, podnapisi, thesubdb, opensubtitles',
            'permissions': '777',
            'post-process': 'False',
            'pix-fmt': ''
        }
        # Default settings for CouchPotato
        cp_defaults = {
            'host': 'localhost',
            'port': '5050',
            'username': '',
            'password': '',
            'apikey': '',
            'delay': '65',
            'method': 'renamer',
            'delete_failed': 'False',
            'ssl': 'False',
            'web_root': ''
        }
        # Default settings for Sonarr
        sonarr_defaults = {
            'host': 'localhost',
            'port': '8989',
            'apikey': '',
            'ssl': 'False',
            'web_root': ''
        }
        # Default uTorrent settings
        utorrent_defaults = {
            'couchpotato-label': 'couchpotato',
            'sickbeard-label': 'sickbeard',
            'sickrage-label': 'sickrage',
            'sonarr-label': 'sonarr',
            'bypass-label': 'bypass',
            'convert': 'True',
            'webui': 'False',
            'action_before': 'stop',
            'action_after': 'removedata',
            'host': 'http://localhost:8080/',
            'username': '',
            'password': ''
        }
        # Default SAB settings
        sab_defaults = {
            'convert': 'True',
            'Sickbeard-category': 'sickbeard',
            'Sickrage-category': 'sickrage',
            'Couchpotato-category': 'couchpotato',
            'Sonarr-category': 'sonarr',
            'Bypass-category': 'bypass'
        }
        # Default Sickrage Settings
        sr_defaults = {
            'host': 'localhost',
            'port': '8081',
            'ssl': "False",
            'api_key': '',
            'web_root': '',
            'username': '',
            'password': ''
        }

        # Default deluge settings
        deluge_defaults = {
            'couchpotato-label': 'couchpotato',
            'sickbeard-label': 'sickbeard',
            'sickrage-label': 'sickrage',
            'sonarr-label': 'sonarr',
            'bypass-label': 'bypass',
            'convert': 'True',
            'host': 'localhost',
            'port': '58846',
            'username': '',
            'password': ''
        }

        # Default Plex Settings
        plex_defaults = {
            'host': 'localhost',
            'port': '32400',
            'refresh': 'true',
            'token': ''
        }

        defaults = {
            'SickBeard': sb_defaults,
            'CouchPotato': cp_defaults,
            'Sonarr': sonarr_defaults,
            'MP4': mp4_defaults,
            'uTorrent': utorrent_defaults,
            'SABNZBD': sab_defaults,
            'Sickrage': sr_defaults,
            'Deluge': deluge_defaults,
            'Plex': plex_defaults
        }
        write = False  # Will be changed to true if a value is missing from the config file and needs to be written

        config = configparser.SafeConfigParser()
        configFile = os.path.join(directory, filename)
        if os.path.isfile(configFile):
            config.read(configFile)
        else:
            log.error("Config file not found, creating %s." % configFile)
            # config.filename = filename
            write = True

        # Make sure all sections and all keys for each section are present
        for s in defaults:
            if not config.has_section(s):
                config.add_section(s)
                write = True
            for k in defaults[s]:
                if not config.has_option(s, k):
                    config.set(s, k, defaults[s][k])
                    write = True

        # If any keys are missing from the config file, write them
        if write:
            self.writeConfig(config, configFile)

        # Read relevant MP4 section information
        section = "MP4"
        self.ffmpeg = os.path.normpath(self.raw(config.get(
            section, "ffmpeg")))  # Location of FFMPEG.exe
        self.ffprobe = os.path.normpath(
            self.raw(config.get(section,
                                "ffprobe")))  # Location of FFPROBE.exe
        self.threads = config.get(section,
                                  "threads")  # Number of FFMPEG threads
        try:
            if int(self.threads) < 1:
                self.threads = "auto"
        except:
            self.threads = "auto"

        self.output_dir = config.get(section, "output_directory")
        if self.output_dir == '':
            self.output_dir = None
        else:
            self.output_dir = os.path.normpath(self.raw(
                self.output_dir))  # Output directory
        self.copyto = config.get(
            section,
            "copy_to")  # Directories to make copies of the final product
        if self.copyto == '':
            self.copyto = None
        else:
            self.copyto = self.copyto.split('|')
            for i in range(len(self.copyto)):
                self.copyto[i] = os.path.normpath(self.copyto[i])
                if not os.path.isdir(self.copyto[i]):
                    try:
                        os.makedirs(self.copyto[i])
                    except:
                        log.exception("Error making directory %s." %
                                      (self.copyto[i]))
        self.moveto = config.get(
            section, "move_to")  # Directory to move final product to
        if self.moveto == '':
            self.moveto = None
        else:
            self.moveto = os.path.normpath(self.moveto)
            if not os.path.isdir(self.moveto):
                try:
                    os.makedirs(self.moveto)
                except:
                    log.exception("Error making directory %s." % (self.moveto))
                    self.moveto = None

        self.output_extension = config.get(
            section, "output_extension")  # Output extension
        self.output_format = config.get(section,
                                        "output_format")  # Output format
        if self.output_format not in valid_formats:
            self.output_format = 'mov'
        self.delete = config.getboolean(
            section, "delete_original")  # Delete original file
        self.relocate_moov = config.getboolean(
            section, "relocate_moov")  # Relocate MOOV atom to start of file
        if self.relocate_moov:
            try:
                import qtfaststart
            except:
                log.error(
                    "Please install QTFastStart via PIP, relocate_moov will be disabled without this module."
                )
                self.relocate_moov = False
        self.acodec = config.get(section, "audio-codec").lower(
        )  # Gets the desired audio codec, if no valid codec selected, default to AC3
        if self.acodec == '':
            self.acodec == ['ac3']
        else:
            self.acodec = self.acodec.lower().replace(' ', '').split(',')

        self.abitrate = config.get(section, "audio-channel-bitrate")
        try:
            self.abitrate = int(self.abitrate)
        except:
            self.abitrate = 256
            log.warning(
                "Audio bitrate was invalid, defaulting to 256 per channel.")
        if self.abitrate > 256:
            log.warning(
                "Audio bitrate >256 may create errors with common codecs.")

        self.iOS = config.get(
            section, "ios-audio"
        )  # Creates a second audio channel if the standard output methods are different from this for iOS compatability
        if self.iOS == "" or self.iOS.lower() in ['false', 'no', 'f', '0']:
            self.iOS = False
        else:
            if self.iOS.lower() in ['true', 'yes', 't', '1']:
                self.iOS = 'aac'
        self.iOSFirst = config.getboolean(
            section, "ios-first-track-only"
        )  # Enables the iOS audio option only for the first track

        self.downloadsubs = config.getboolean(
            section, "download-subs"
        )  # Enables downloading of subtitles from the internet sources using subliminal
        if self.downloadsubs:
            try:
                import subliminal
            except Exception as e:
                self.downloadsubs = False
                log.exception(
                    "Subliminal is not installed, automatically downloading of subs has been disabled."
                )
        self.subproviders = config.get(section, 'sub-providers').lower()
        if self.subproviders == '':
            self.downloadsubs = False
            log.warning(
                "You must specifiy at least one subtitle provider to downlaod subs automatically, subtitle downloading disabled."
            )
        else:
            self.subproviders = self.subproviders.lower().replace(
                ' ', '').split(',')

        self.embedsubs = config.getboolean(section, 'embed-subs')

        self.permissions = config.get(section, 'permissions')
        try:
            self.permissions = int(self.permissions, 8)
        except:
            self.log.exception("Invalid permissions, defaulting to 777.")
            self.permissions = int("0777", 8)

        try:
            self.postprocess = config.getboolean(section, 'post-process')
        except:
            self.postprocess = False

        # Setup variable for maximum audio channels
        self.maxchannels = config.get(section, 'max-audio-channels')
        if self.maxchannels == "":
            self.maxchannels = None
        else:
            try:
                self.maxchannels = int(self.maxchannels)
            except:
                log.exception("Invalid number of audio channels specified.")
                self.maxchannels = None
        if self.maxchannels is not None and self.maxchannels < 1:
            log.warning("Must have at least 1 audio channel.")
            self.maxchannels = None

        self.vcodec = config.get(section, "video-codec")
        if self.vcodec == '':
            self.vcodec == ['h264', 'x264']
        else:
            self.vcodec = self.vcodec.lower().replace(' ', '').split(',')

        self.vbitrate = config.get(section, "video-bitrate")
        if self.vbitrate == '':
            self.vbitrate = None
        else:
            try:
                self.vbitrate = int(self.vbitrate)
                if not (self.vbitrate > 0):
                    self.vbitrate = None
                    log.warning(
                        "Video bitrate must be greater than 0, defaulting to no video bitrate cap."
                    )
            except:
                log.exception(
                    "Invalid video bitrate, defaulting to no video bitrate cap."
                )
                self.vbitrate = None

        self.vwidth = config.get(section, "video-max-width")
        if self.vwidth == '':
            self.vwidth = None
        else:
            try:
                self.vwidth = int(self.vwidth)
            except:
                log.exception("Invalid video width, defaulting to none.")
                self.vwidth = None

        self.h264_level = config.get(section, "h264-max-level")
        if self.h264_level == '':
            self.h264_level = None
        else:
            try:
                self.h264_level = float(self.h264_level)
            except:
                log.exception("Invalid h264 level, defaulting to none.")
                self.h264_level = None

        self.pix_fmt = config.get(section, "pix-fmt").strip().lower()
        if self.pix_fmt == '':
            self.pix_fmt = None

        self.awl = config.get(section, 'audio-language').strip().lower(
        )  # List of acceptable languages for audio streams to be carried over from the original file, separated by a comma. Blank for all
        if self.awl == '':
            self.awl = None
        else:
            self.awl = self.awl.replace(' ', '').split(',')

        self.scodec = config.get(section, 'subtitle-codec').strip().lower()
        if not self.scodec or self.scodec == "":
            if self.embedsubs:
                self.scodec = 'mov_text'
            else:
                self.scodec = 'srt'
            log.warning("Invalid subtitle codec, defaulting to '%s'." %
                        self.scodec)

        if self.embedsubs and self.scodec not in valid_internal_subcodecs:
            log.warning(
                "Invalid interal subtitle codec %s, defaulting to 'mov_text'."
                % self.scodec)
            self.scodec = 'mov_text'

        if not self.embedsubs and self.scodec not in valid_external_subcodecs:
            log.warning(
                "Invalid external subtitle codec %s, defaulting to 'srt'." %
                self.scodec)
            self.scodec = 'srt'

        self.swl = config.get(section, 'subtitle-language').strip().lower(
        )  # List of acceptable languages for subtitle streams to be carried over from the original file, separated by a comma. Blank for all
        if self.swl == '':
            self.swl = None
        else:
            self.swl = self.swl.replace(' ', '').split(',')

        self.subencoding = config.get(section,
                                      'subtitle-encoding').strip().lower()
        if self.subencoding == '':
            self.subencoding = None

        self.adl = config.get(section, 'audio-default-language').strip().lower(
        )  # What language to default an undefinied audio language tag to. If blank, it will remain undefined. This is useful for single language releases which tend to leave things tagged as und
        if self.adl == "" or len(self.adl) > 3:
            self.adl = None

        self.sdl = config.get(section, 'subtitle-default-language').strip(
        ).lower(
        )  # What language to default an undefinied subtitle language tag to. If blank, it will remain undefined. This is useful for single language releases which tend to leave things tagged as und
        if self.sdl == "" or len(self.sdl) > 3:
            self.sdl = None
        # Prevent incompatible combination of settings
        if self.output_dir == "" and self.delete is False:
            log.error(
                "You must specify an alternate output directory if you aren't going to delete the original file."
            )
            sys.exit()
        # Create output directory if it does not exist
        if self.output_dir is not None:
            if not os.path.isdir(self.output_dir):
                os.makedirs(self.output_dir)
        self.processMP4 = config.getboolean(
            section, "convert-mp4"
        )  # Determine whether or not to reprocess mp4 files or just tag them
        self.fullpathguess = config.getboolean(
            section, "fullpathguess")  # Guess using the full path or not
        self.tagfile = config.getboolean(section,
                                         "tagfile")  # Tag files with metadata
        self.taglanguage = config.get(
            section, "tag-language").strip().lower()  # Language to tag files
        if len(self.taglanguage) > 2:
            try:
                babel = Language(self.taglanguage)
                self.taglanguage = babel.alpha2
            except:
                log.exception(
                    "Unable to set tag language, defaulting to English.")
                self.taglanguage = 'en'
        elif len(self.taglanguage) < 2:
            log.exception("Unable to set tag language, defaulting to English.")
            self.taglanguage = 'en'
        self.artwork = config.get(
            section, "download-artwork").lower()  # Download and embed artwork
        if self.artwork == "poster":
            self.artwork = True
            self.thumbnail = False
        elif self.artwork == "thumb" or self.artwork == "thumbnail":
            self.artwork = True
            self.thumbnail = True
        else:
            self.thumbnail = False
            try:
                self.artwork = config.getboolean(section, "download-artwork")
            except:
                self.artwork = True
                self.log.error(
                    "Invalid download-artwork value, defaulting to 'poster'.")

        # Read relevant CouchPotato section information
        section = "CouchPotato"
        self.CP = {}
        self.CP['host'] = config.get(section, "host")
        self.CP['port'] = config.get(section, "port")
        self.CP['username'] = config.get(section, "username")
        self.CP['password'] = config.get(section, "password")
        self.CP['apikey'] = config.get(section, "apikey")
        self.CP['delay'] = config.get(section, "delay")
        self.CP['method'] = config.get(section, "method")
        self.CP['web_root'] = config.get(section, "web_root")

        try:
            self.CP['delay'] = float(self.CP['delay'])
        except ValueError:
            self.CP['delay'] = 60
        try:
            self.CP['delete_failed'] = config.getboolean(
                section, "delete_failed")
        except (configparser.NoOptionError, ValueError):
            self.CP['delete_failed'] = False
        try:
            if config.getboolean(section, 'ssl'):
                self.CP['protocol'] = "https://"
            else:
                self.CP['protocol'] = "http://"
        except (configparser.NoOptionError, ValueError):
            self.CP['protocol'] = "http://"

        # Read relevant uTorrent section information
        section = "uTorrent"
        self.uTorrent = {}
        self.uTorrent['cp'] = config.get(section, "couchpotato-label").lower()
        self.uTorrent['sb'] = config.get(section, "sickbeard-label").lower()
        self.uTorrent['sr'] = config.get(section, "sickrage-label").lower()
        self.uTorrent['sonarr'] = config.get(section, "sonarr-label").lower()
        self.uTorrent['bypass'] = config.get(section, "bypass-label").lower()
        try:
            self.uTorrent['convert'] = config.getboolean(section, "convert")
        except:
            self.uTorrent['convert'] = False
        self.uTorrentWebUI = config.getboolean(section, "webui")
        self.uTorrentActionBefore = config.get(section,
                                               "action_before").lower()
        self.uTorrentActionAfter = config.get(section, "action_after").lower()
        self.uTorrentHost = config.get(section, "host").lower()
        self.uTorrentUsername = config.get(section, "username")
        self.uTorrentPassword = config.get(section, "password")

        # Read relevant Deluge section information
        section = "Deluge"
        self.deluge = {}
        self.deluge['cp'] = config.get(section, "couchpotato-label").lower()
        self.deluge['sb'] = config.get(section, "sickbeard-label").lower()
        self.deluge['sr'] = config.get(section, "sickrage-label").lower()
        self.deluge['sonarr'] = config.get(section, "sonarr-label").lower()
        self.deluge['bypass'] = config.get(section, "bypass-label").lower()
        try:
            self.deluge['convert'] = config.getboolean(section, "convert")
        except:
            self.deluge['convert'] = False
        self.deluge['host'] = config.get(section, "host").lower()
        self.deluge['port'] = config.get(section, "port")
        self.deluge['user'] = config.get(section, "username")
        self.deluge['pass'] = config.get(section, "password")

        # Read relevant Sonarr section information
        section = "Sonarr"
        self.Sonarr = {}
        self.Sonarr['host'] = config.get(section, "host")
        self.Sonarr['port'] = config.get(section, "port")
        self.Sonarr['apikey'] = config.get(section, "apikey")
        self.Sonarr['ssl'] = config.get(section, "ssl")
        self.Sonarr['web_root'] = config.get(section, "web_root")

        # Read Sickbeard section information
        section = "SickBeard"
        self.Sickbeard = {}
        self.Sickbeard['host'] = config.get(section, "host")  # Server Address
        self.Sickbeard['port'] = config.get(section, "port")  # Server Port
        self.Sickbeard['api_key'] = config.get(section,
                                               "api_key")  # Sickbeard API key
        self.Sickbeard['web_root'] = config.get(
            section, "web_root")  # Sickbeard webroot
        self.Sickbeard['ssl'] = config.getboolean(section, "ssl")  # SSL
        self.Sickbeard['user'] = config.get(section, "username")
        self.Sickbeard['pass'] = config.get(section, "password")

        # Read Sickrage section information
        section = "Sickrage"
        self.Sickrage = {}
        self.Sickrage['host'] = config.get(section, "host")  # Server Address
        self.Sickrage['port'] = config.get(section, "port")  # Server Port
        self.Sickrage['api_key'] = config.get(section,
                                              "api_key")  # Sickbeard API key
        self.Sickrage['web_root'] = config.get(section,
                                               "web_root")  # Sickbeard webroot
        self.Sickrage['ssl'] = config.getboolean(section, "ssl")  # SSL
        self.Sickrage['user'] = config.get(section, "username")
        self.Sickrage['pass'] = config.get(section, "password")

        # Read SAB section information
        section = "SABNZBD"
        self.SAB = {}
        try:
            self.SAB['convert'] = config.getboolean(section,
                                                    "convert")  # Convert
        except:
            self.SAB['convert'] = False
        self.SAB['cp'] = config.get(section, "Couchpotato-category").lower()
        self.SAB['sb'] = config.get(section, "Sickbeard-category").lower()
        self.SAB['sr'] = config.get(section, "Sickrage-category").lower()
        self.SAB['sonarr'] = config.get(section, "Sonarr-category").lower()
        self.SAB['bypass'] = config.get(section, "Bypass-category").lower()

        # Read Plex section information
        section = "Plex"
        self.Plex = {}
        self.Plex['host'] = config.get(section, "host")
        self.Plex['port'] = config.get(section, "port")
        try:
            self.Plex['refresh'] = config.getboolean(section, "refresh")
        except:
            self.Plex['refresh'] = False
        self.Plex['token'] = config.get(section, "token")
        if self.Plex['token'] == '':
            self.Plex['token'] = None

        # Pass the values on
        self.config = config
        self.configFile = configFile
Example #26
0
import os
try:  # Importing python3 style first.
    import configparser as ConfigParser
except:  # Fall back to python2.
    import ConfigParser

import lib.message as message

conf = ConfigParser.SafeConfigParser({
    'WORK_DIR': '/home/customizer',
    'LOCALES': 'C',
    'RESOLUTION': '800x600',
    'COMPRESSION': 'gzip',
    'VRAM': '256',
    'ISO': '',
    'DEB': '',
    'LABEL': 'default',
    'HOOK': '',
    'KERNEL': 'default',
    'PURGE_KERNEL': True,
})

if not os.path.isfile('/etc/customizer.conf'):
    message.warning('Configuration file does not exist',
                    '/etc/customizer.conf')

conf.read('/etc/customizer.conf')
message.info('Read Configuration file', '/etc/customizer.conf')
for section in ('preferences', 'saved'):
    if not conf.has_section(section):
    def _parse_args(self, args_str):
        '''
        Eg. python add_route_target.py --routing_instance_name mx1
                                       --router_asn 64512
                                       --api_server_ip 127.0.0.1
                                       --api_server_port 8082
                                       --api_server_use_ssl False
        '''

        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument("-c",
                                 "--conf_file",
                                 help="Specify config file",
                                 metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str.split())

        defaults = {
            'routing_instance_name':
            'default-domain:default-project:ip-fabric:__default__',
            'route_target_number': '45',
            'router_asn': '64513',
            'api_server_ip': '127.0.0.1',
            'api_server_port': '8082',
            'api_server_use_ssl': False,
            'admin_user': None,
            'admin_password': None,
            'admin_tenant_name': None
        }

        if args.conf_file:
            config = configparser.SafeConfigParser()
            config.read([args.conf_file])
            defaults.update(dict(config.items("DEFAULTS")))

        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        parser.set_defaults(**defaults)

        parser.add_argument("--routing_instance_name",
                            help="Colon separated fully qualified name",
                            required=True)
        parser.add_argument("--route_target_number",
                            help="Route Target for MX interaction",
                            required=True)
        parser.add_argument("--router_asn",
                            help="AS Number the MX is in",
                            required=True)
        parser.add_argument("--api_server_ip",
                            help="IP address of api server",
                            required=True)
        parser.add_argument("--api_server_port",
                            help="Port of api server",
                            required=True)
        parser.add_argument("--api_server_use_ssl",
                            help="Use SSL to connect with API server")
        parser.add_argument("--admin_user",
                            help="Name of keystone admin user",
                            required=True)
        parser.add_argument("--admin_password",
                            help="Password of keystone admin user",
                            required=True)
        parser.add_argument("--admin_tenant_name",
                            help="Tenamt name for keystone admin user",
                            required=True)

        self._args = parser.parse_args(remaining_argv)
Example #28
0
def _main():
    # Force all prints to go to stderr
    stdout = sys.stdout
    sys.stdout = sys.stderr
    # Import dependencies only needed by _main()
    import zipfile
    # Get args
    args = _parse_args()
    # Set verbose
    if args.verbose:
        global VERBOSE
        VERBOSE = args.verbose

    # Checks if user is trying to initialize
    if str(args.Files) == "['init']" and not os.path.isfile('init'):
        _init(args)

    if not os.path.isfile(args.config):
        config_init(args.config)

    # Make sure report is not a dir
    if args.json:
        if os.path.isdir(args.json):
            print('ERROR:', args.json, 'is a directory, a file is expected')
            return False

    # Parse the file list
    parsedlist = parseFileList(args.Files, recursive=args.recursive)

    # Unzip zip files if asked to
    if args.extractzips:
        for fname in parsedlist:
            if zipfile.is_zipfile(fname):
                unzip_dir = os.path.join('_tmp', os.path.basename(fname))
                z = zipfile.ZipFile(fname)
                if PY3:
                    args.password = bytes(args.password, 'utf-8')
                try:
                    z.extractall(path=unzip_dir, pwd=args.password)
                    for uzfile in z.namelist():
                        parsedlist.append(os.path.join(unzip_dir, uzfile))
                except RuntimeError as e:
                    print("ERROR: Failed to extract ", fname, ' - ', e, sep='')
                parsedlist.remove(fname)

    # Resume from report
    if args.resume:
        i = len(parsedlist)
        try:
            reportfile = codecs.open(args.json, 'r', 'utf-8')
        except Exception as e:
            print("ERROR: Could not open report file")
            exit(1)
        for line in reportfile:
            line = json.loads(line)
            for fname in line:
                if fname in parsedlist:
                    parsedlist.remove(fname)
        reportfile.close()
        i = i - len(parsedlist)
        if VERBOSE:
            print("Skipping", i, "files which are in the report already")

    # Do multiple runs if there are too many files
    filelists = []
    if len(parsedlist) > args.numberper:
        while len(parsedlist) > args.numberper:
            filelists.append(parsedlist[:args.numberper])
            parsedlist = parsedlist[args.numberper:]
    if parsedlist:
        filelists.append(parsedlist)

    for filelist in filelists:
        # Record start time for metadata
        starttime = str(datetime.datetime.now())

        # Run the multiscan
        results = multiscan(filelist, recursive=None, configfile=args.config)

        # We need to read in the config for the parseReports call
        Config = configparser.SafeConfigParser()
        Config.optionxform = str
        Config.read(args.config)
        config = _get_main_config(Config)
        # Make sure we have a group-types
        if "group-types" not in config:
            config["group-types"] = []
        elif not config["group-types"]:
            config["group-types"] = []

        # Add in script metadata
        endtime = str(datetime.datetime.now())

        # For windows compatibility
        try:
            username = os.getlogin()
        except:
            username = os.getenv('USERNAME')

        results.append((
            [],
            {
                "Name": "MultiScanner",
                "Start Time": starttime,
                "End Time": endtime,
                # "Command Line":list2cmdline(sys.argv),
                "Run by": username
            }))

        if args.show or not stdout.isatty():
            # TODO: Make this output something readable
            # Parse Results
            report = parse_reports(results,
                                   groups=config["group-types"],
                                   ugly=args.ugly,
                                   includeMetadata=args.metadata)

            # Print report
            try:
                print(convert_encoding(report,
                                       encoding='ascii',
                                       errors='replace'),
                      file=stdout)
                stdout.flush()
            except Exception as e:
                print('ERROR: Can\'t print report -', e)

        report = parse_reports(results,
                               groups=config["group-types"],
                               includeMetadata=args.metadata,
                               python=True)

        update_conf = None
        if args.json:
            update_conf = {'File': {'path': args.json}}
            if args.json.endswith('.gz') or args.json.endswith('.gzip'):
                update_conf['File']['gzip'] = True

        if 'storage-config' not in config:
            config["storage-config"] = None
        storage_handle = storage.StorageHandler(
            configfile=config["storage-config"], config=update_conf)
        storage_handle.store(report)
        storage_handle.close()

    # Cleanup zip extracted files
    if args.extractzips:
        shutil.rmtree('_tmp')
Example #29
0
        date = date.strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月',
                                                       d='日')  # 格式化为中文日期
        return date


if __name__ == '__main__':
    print("全部数据汇总表只能是xlsx格式")
    excels_dir_path: str = input('请输入各县区表格所在目录路径:')
    begin_date: str = input('请输入起始时间(如2021-4-13):')
    end_date: str = input('请输入结束时间(如2021-5-13):')

    report_filename: str = Util.generate_report_filename(begin_date, end_date)
    date: str = Util.generate_date_in_weekly_report(begin_date, end_date)

    # 从配置文件读取文件路径
    cf = configparser.SafeConfigParser()
    with codecs.open('./config.ini', 'r', encoding='utf-8') as f:
        cf.readfp(f)
    secs = cf.sections()
    all_summary_path: str = cf.get('Util', 'all_summary_path')  # 所有汇总数据存放的文件夹
    word_template_path: str = cf.get('Util',
                                     'word_template_path')  # 用于生成报告的模板文件
    weekly_report: str = cf.get('Util', 'report')
    excel_weekly_summary_template_path: str = cf.get(
        'GenerateWeeklyReport', 'excel_weekly_summary_template_path')
    excel_weekly_summary_dir: str = cf.get('GenerateWeeklyReport',
                                           'excel_weekly_summary_dir')

    word_target_path: str = os.path.join(weekly_report,
                                         '督察主要业务数据' + date) + '.docx'
    excel_weekly_summary_path: str = os.path.join(
    def loadConfiguration(self,
                          confFilename,
                          isDaemon=False,
                          logFile=None,
                          workingDir='.'):
        # Reinitialize
        self.__init__()

        if not isDaemon:
            print("Reading configuration file [%s]" % (confFilename))
        parser = configparser.SafeConfigParser()
        parser.read(confFilename)
        if not isDaemon:
            print("Configuration file successfully read")

        if isDaemon:
            logFileDefault = os.path.abspath('/tmp/sensorbridge-neurio.log')
        else:
            logFileDefault = os.path.abspath('%s/sensorbridge-neurio.log' %
                                             (workingDir))

        if logFile is not None:
            self.configOpts['logFile'] = os.path.abspath(logFile)
        else:
            self.configOpts['logFile'] = self.parserGetWithDefault(
                parser, "global", "logFile", logFileDefault)

        # This is where logging gets set up
        self.configOpts['consoleLogLevel'] = self.parserGetWithDefault(
            parser, "global", "consoleLogLevel", "error").lower()
        self.configOpts['fileLogLevel'] = self.parserGetWithDefault(
            parser, "global", "fileLogLevel", "debug").lower()
        self.setupLogger(self.configOpts['logFile'],
                         self.configOpts['consoleLogLevel'],
                         self.configOpts['fileLogLevel'])

        self.logger.info(
            "---------------------------------------------------------------------------"
        )
        self.logger.info(
            "Logging startup at %s",
            datetime.datetime.utcnow().replace(
                tzinfo=datetime.timezone.utc).isoformat())

        # Get global options
        self.configOpts['sourceName'] = self.parserGetWithDefault(
            parser, "global", "sourceName", "sensorbridge-neurio")
        self.configOpts['locale'] = self.parserGetWithDefault(
            parser, "global", "locale", "house")
        self.configOpts['neurioName'] = self.parserGetWithDefault(
            parser, "global", "neurioName", "main_panel")
        self.configOpts['mqttBroker'] = self.parserGetWithDefault(
            parser, "global", "mqttServer", "localhost")
        self.configOpts['mqttPort'] = self.parserGetIntWithDefault(
            parser, "global", "mqttPort", 1883)
        self.configOpts['mqttUsername'] = self.parserGetWithDefault(
            parser, "global", "mqttUsername", None)
        self.configOpts['mqttPassword'] = self.parserGetWithDefault(
            parser, "global", "mqttPassword", None)
        self.configOpts[
            'mqttReconnectInterval'] = self.parserGetIntWithDefault(
                parser, "global", "mqttReconnectInterval", 10)
        self.configOpts['neurioAddress'] = self.parserGetWithDefault(
            parser, "global", "neurioAddress", None)
        self.configOpts['postInterval'] = self.parserGetIntWithDefault(
            parser, "global", "postInterval", 10)