Пример #1
0
def tabulate_scores():

    keys = []
    parser = SafeConfigParser()
    parser.read('prez.ini')

    master_email = parser.get('prez', 'master_email')
    # temp make a different master email address for testing
    # master_email="*****@*****.**"
    db_host = parser.get('prez', 'db_host')
    db_username = parser.get('prez', 'db_username')
    db_password = parser.get('prez', 'db_password')
    db = parser.get('prez', 'db')

    con = MySQLdb.connect(db_host, db_username, db_password, db)

    with con:
        cur = con.cursor()
        cur.execute("SELECT count(*) FROM State_Rep")
        num_entries = cur.fetchone()
        # Create list variables
        state_dem_values,state_rep_values,state_win_values,pres_win_values=[],[],[],[]
        state_dem_score,state_rep_score,state_win_score,pres_win_score=[],[],[],[]
        master_state_dem_values,master_state_rep_values,master_state_win_values,master_pres_win_values=[],[],[],[]

        getvalues(cur, state_dem_values, "State_Dem", "")
        getvalues(cur, state_rep_values, "State_Rep", "")
        getvalues(cur, state_win_values, "State_Win", "")
        getvalues(cur, pres_win_values, "Pres_Win", "")

        getvalues(cur, master_state_dem_values, "State_Dem", master_email)
        getvalues(cur, master_state_rep_values, "State_Rep", master_email)
        getvalues(cur, master_state_win_values, "State_Win", master_email)
        getvalues(cur, master_pres_win_values, "Pres_Win", master_email)

        tick = "'"
        comma = ", "
        # weights are: state_dem, state_rep, state_win, pres_win
        score_weight = [1, 1, 3, 5]
        score_key_create = "Email CHAR(64) PRIMARY KEY, Timestamp Datetime, State_Dem_Score INT, State_Rep_Score INT, State_Win_Score INT, Pres_Win_Score INT, Total_Score INT"
        # cur.execute("""CREATE TABLE if not exists Scores (%s)""" % score_key_create)

        score_key = "Email, Timestamp, State_Dem_Score, State_Rep_Score, State_Win_Score, Pres_Win_Score, Total_Score"
        # print "number of entries is", num_entries[0]
        for i in range(num_entries[0]
                       ):  #num_entries is a tuple with one entry so need [0]
            user_email = state_dem_values[i][0]  #email address of user
            if (state_rep_values[i][0] != user_email):
                print "ERROR State_Rep Email does not match DEM Email"
            if (state_win_values[i][0] != user_email):
                print "ERROR State_Win Email does not match DEM Email"
            if (pres_win_values[i][0] != user_email):
                print "ERROR Pres_Win Email does not match DEM Email"
            user_timestamp = state_dem_values[i][1]
            new_timestamp = user_timestamp.strftime('%Y-%m-%d %H:%M:%S')
            state_dem_matches = count_matches(master_state_dem_values[0],
                                              state_dem_values[i], 'NoSel')
            state_rep_matches = count_matches(master_state_rep_values[0],
                                              state_rep_values[i], 'NoSel')
            state_win_matches = count_matches(master_state_win_values[0],
                                              state_win_values[i], 'NoSel')
            pres_win_matches = count_matches(master_pres_win_values[0],
                                             pres_win_values[i], 'NoSel')
            score_weighted_sum = score_weight[0] * state_dem_matches
            score_weighted_sum += score_weight[1] * state_rep_matches
            score_weighted_sum += score_weight[2] * state_win_matches
            score_weighted_sum += score_weight[3] * pres_win_matches

            score_set = ("Email=" + tick + user_email + tick + comma +
                         "State_Dem_Score=" + str(state_dem_matches) + comma +
                         "State_Rep_Score=" + str(state_rep_matches) + comma +
                         "State_Win_Score=" + str(state_win_matches) + comma +
                         "Pres_Win_Score=" + str(pres_win_matches) + comma +
                         "Total_Score=" + str(score_weighted_sum) + comma +
                         "Timestamp=" + tick + new_timestamp + tick)

            #                        print score_set
            cur.execute(
                """INSERT INTO Scores SET %s ON DUPLICATE KEY UPDATE %s""" %
                (score_set, score_set))
Пример #2
0
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-

from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.api.urlfetch import DownloadError

from library import logins3

from boto.ec2.connection import *

from ConfigParser import SafeConfigParser

parser = SafeConfigParser()
parser.read('simple.cfg')


class ACL_Aendern(webapp.RequestHandler):
    def post(self):
        # self.response.out.write('posted!')
        keyname = self.request.get('keyname')

        canned_acl = self.request.get('canned_acl')

        # Get the MD5 hash of the key that need to be erased
        md5hash = self.request.get('md5hash')

        # Get the username
        username = users.get_current_user()
        if not username:
def anal_assim_layer(cycleYYYYMMDDHH,fhr,action,config):
    """ Analysis and Assimilation layering
        Performs layering/combination of RAP/HRRR/MRMS
        data for a particular analysis and assimilation
        model cycle and forecast hour.

        Args:
            cycleYYYYMMDDHH (string): Analysis and assimilation
                                      model cycle date.
            fhr (string): Forecast hour of analysis and assimilation 
                          model cycle. Possible values are -2, -1, 0.
            action (string): Specifying which layering to do, given
                             possible available model data. Possible 
                             values are "RAP", "RAP_HRRR", and
                             "RAP_HRRR_MRMS".
            config (string) : Config file name
        Returns: 
            None: Performs specified layering to final input directory
                  used for WRF-Hydro.
    """

    # Determine specific layering route to take
    str_split = action.split("_")
    process = len(str_split)

    # Determine specific date/time information used for composing regridded
    # file paths. 
    yearCycle = int(cycleYYYYMMDDHH[0:4])
    monthCycle = int(cycleYYYYMMDDHH[4:6])
    dayCycle = int(cycleYYYYMMDDHH[6:8])
    hourCycle = int(cycleYYYYMMDDHH[8:10])
    fhr = int(fhr)
 
    dateCurrent = datetime.datetime.today()  
    cycleDate = datetime.datetime(year=yearCycle,month=monthCycle,day=dayCycle, \
                hour=hourCycle)
    validDate = cycleDate + datetime.timedelta(seconds=fhr*3600)
    fcstWindowDate = validDate + datetime.timedelta(seconds=-3*3600) # Used for 3-hr forecast

    # HRRR/RAP files necessary for fluxes and precipitation data.
    # Obtain analysis and assimiltation configuration parameters.
    parser = SafeConfigParser()
    parser.read(config)
    out_dir = parser.get('layering','analysis_assimilation_output')
    tmp_dir = parser.get('layering','analysis_assimilation_tmp')
    qpe_parm_dir = parser.get('layering','qpe_combine_parm_dir')
    hrrr_ds_dir_3hr = parser.get('downscaling','HRRR_finished_output_dir')
    hrrr_ds_dir_0hr = parser.get('downscaling','HRRR_finished_output_dir_0hr')
    rap_ds_dir_3hr = parser.get('downscaling','RAP_finished_output_dir')
    rap_ds_dir_0hr = parser.get('downscaling','RAP_finished_output_dir_0hr')
    mrms_ds_dir = parser.get('regridding','MRMS_finished_output_dir')
    layer_exe = parser.get('exe','Analysis_Assimilation_layering')
    ncl_exec = parser.get('exe', 'ncl_exe')

    # in case it is first time, create the output dirs
    df.makeDirIfNeeded(out_dir)
    df.makeDirIfNeeded(tmp_dir)

    # Sanity checking
    try:
        whf.dir_exists(out_dir)
        whf.dir_exists(tmp_dir)
        whf.dir_exists(qpe_parm_dir)
        whf.dir_exists(hrrr_ds_dir_3hr)
        whf.dir_exists(hrrr_ds_dir_0hr)
        whf.dir_exists(rap_ds_dir_3hr)
        whf.dir_exists(rap_ds_dir_0hr)
        whf.dir_exists(mrms_ds_dir)
        whf.file_exists(layer_exe)
    except MissingFileError:
        WhfLog.error("Missing file during preliminary checking of Analysis Assimilation layering")
        raise
    

    # Establish final output directories to hold 'LDASIN' files used for
    # WRF-Hydro long-range forecasting. If the directory does not exist,
    # create it.
    out_path = out_dir + "/" + cycleDate.strftime("%Y%m%d%H")

    whf.mkdir_p(out_path)

    # Compose necessary file paths  
    hrrr0Path = hrrr_ds_dir_0hr + "/" + validDate.strftime("%Y%m%d%H") + \
                "/" + validDate.strftime("%Y%m%d%H") + "00.LDASIN_DOMAIN1.nc"
    hrrr3Path = hrrr_ds_dir_3hr + "/" + fcstWindowDate.strftime("%Y%m%d%H") + \
                "/" + validDate.strftime("%Y%m%d%H") + "00.LDASIN_DOMAIN1.nc"     
    rap0Path = rap_ds_dir_0hr + "/" + validDate.strftime("%Y%m%d%H") + \
                "/" + validDate.strftime("%Y%m%d%H") + "00.LDASIN_DOMAIN1.nc"
    rap3Path = rap_ds_dir_3hr + "/" + fcstWindowDate.strftime("%Y%m%d%H") + \
                "/" + validDate.strftime("%Y%m%d%H") + "00.LDASIN_DOMAIN1.nc"
    mrmsPath = mrms_ds_dir + "/" + validDate.strftime("%Y%m%d%H") + \
                "/" + validDate.strftime("%Y%m%d%H") + "00.LDASIN_DOMAIN1.nc"
    hrrrBiasPath = qpe_parm_dir + "/HRRR_CMC-CPC_bias-corr_m" + \
                   validDate.strftime("%m") + "_v8_wrf1km.grb2"
    hrrrWgtPath = qpe_parm_dir + "/HRRR_wgt_m" + \
                  validDate.strftime("%m") + "_v7_wrf1km.grb2"
    mrmsBiasPath = qpe_parm_dir + "/MRMS_radonly_CMC-CPC_bias-corr_m" + \
                   validDate.strftime("%m") + "_v8_wrf1km.grb2"
    mrmsWgtPath = qpe_parm_dir + "/MRMS_radonly_wgt_m" + \
                  validDate.strftime("%m") + "_v7_wrf1km.grb2"
    rapBiasPath = qpe_parm_dir + "/RAPD_CMC-CPC_bias-corr_m" + \
                  validDate.strftime("%m") + "_v8_wrf1km.grb2"
    rapWgtPath = qpe_parm_dir + "/RAPD_wgt_m" + \
                 validDate.strftime("%m") + "_v7_wrf1km.grb2"

    # Sanity checking on parameter data
    try:
        whf.file_exists(hrrrBiasPath)
        whf.file_exists(hrrrWgtPath)
        whf.file_exists(mrmsBiasPath)
        whf.file_exists(mrmsWgtPath)
        whf.file_exists(rapBiasPath)
        whf.file_exists(rapWgtPath) 
    except MissingFileError:
        WhfLog.error("Missing file encountered while checking parameter data for AA")
        raise


    # Compose output file paths
    LDASIN_path_tmp = tmp_dir + "/" + validDate.strftime('%Y%m%d%H') + "00.LDASIN_DOMAIN1_TMP.nc"
    LDASIN_path_final = out_path + "/" + validDate.strftime('%Y%m%d%H') + "00.LDASIN_DOMAIN1"
    # Perform layering/combining depending on processing path.
    if process == 1:    # RAP only
        WhfLog.info("Layering and Combining RAP only for cycle date: " + \
                     cycleDate.strftime("%Y%m%d%H") + " valid date: " + \
                     validDate.strftime("%Y%m%d%H"))
        # Check for existence of input files
        try:
            whf.file_exists(rap0Path)
            whf.file_exists(rap3Path)
        except MissingFileError :
            WhfLog.error("Missing RAP files for layering")
            raise
            
    elif process == 2:  # HRRR and RAP only 
        WhfLog.info("Layering and Combining RAP and HRRR for cycle date: " + \
                     cycleDate.strftime("%Y%m%d%H") + " valid date: " + \
                     validDate.strftime("%Y%m%d%H"))
        # Check for existence of input files
        try:
            whf.file_exists(rap0Path)
            whf.file_exists(rap3Path)
            whf.file_exists(hrrr0Path)
            whf.file_exists(hrrr3Path)
        except MissingFileError:
            WhfLog.error("Missing RAP or HRRR files for layering")
            raise
    elif process == 3:  # HRRR, RAP, and MRMS
        WhfLog.info("Layering and Combining RAP/HRRR/MRMS for cycle date: " + \
                     cycleDate.strftime("%Y%m%d%H") + " valid date: " + \
                     validDate.strftime("%Y%m%d%H"))
        # Check for existence of input files
        try:
            whf.file_exists(rap0Path)
            whf.file_exists(rap3Path)
            whf.file_exists(hrrr0Path)
            whf.file_exists(hrrr3Path)
            whf.file_exists(mrmsPath)
        except MissingFileError:
            WhfLog.error("Missing RAP or HRRR or MRMS files for layering")
            raise
           
    else:  # Error out
        WhfLog.error("Invalid input action selected, invalid layer combination provided in AA.")
        raise UnrecognizedCommandError

    hrrrB_param = "'hrrrBFile=" + '"' + hrrrBiasPath + '"' + "' "
    mrmsB_param = "'mrmsBFile=" + '"' + mrmsBiasPath + '"' + "' "
    rapB_param = "'rapBFile=" + '"' + rapBiasPath + '"' + "' "
    hrrrW_param = "'hrrrWFile=" + '"' + hrrrWgtPath + '"' + "' "
    mrmsW_param = "'mrmsWFile=" + '"' + mrmsWgtPath + '"' + "' "
    rapW_param = "'rapWFile=" + '"' + rapWgtPath + '"' + "' "
    hrrr0_param = "'hrrr0File=" + '"' + hrrr0Path + '"' + "' "
    hrrr3_param = "'hrrr3File=" + '"' + hrrr3Path + '"' + "' "
    rap0_param = "'rap0File=" + '"' + rap0Path + '"' + "' "
    rap3_param = "'rap3File=" + '"' + rap3Path + '"' + "' "
    mrms_param = "'mrmsFile=" + '"' + mrmsPath + '"' + "' "
    process_param = "'process=" + '"' + str(process) + '"' + "' "
    out_param = "'outPath=" + '"' + LDASIN_path_tmp + '"' + "' "
     
    cmd_params = hrrrB_param + mrmsB_param + rapB_param + \
                 hrrrW_param + mrmsW_param + rapW_param + \
                 hrrr0_param + hrrr3_param + rap0_param + rap3_param + \
                 mrms_param + process_param + out_param
    cmd = ncl_exec + " -Q " + cmd_params + " " + layer_exe
    status = os.system(cmd)

    if status != 0:
        WhfLog.error("Error in combinining NCL program")
        raise NCLError("NCL error encountered while combining in AA")
   
    # Double check to make sure file was created, delete temporary regridded file
    whf.file_exists(LDASIN_path_tmp)
    # Rename file to conform to WRF-Hydro expectations
    cmd = "mv " + LDASIN_path_tmp + " " + LDASIN_path_final
    status = os.system(cmd)
    if status != 0:
        WhfLog.error("Failure to rename " + LDASIN_path_tmp)
    try:
        whf.file_exists(LDASIN_path_final)
    except MissingFileError:
        WhfLog.error("Missing LDASIN_path_final file")
        raise
    cmd = "rm -rf " + LDASIN_path_tmp 
    status = os.system(cmd)
    if status != 0:
        WhfLog.error("Failure to remove " + LDASIN_path_tmp)
        raise SystemCommandError
Пример #4
0
 def load_config(self, config_file):
     self.config = SafeConfigParser()
     self.config.read(config_file)
     self.msg_types = [x.upper() for x in self.config.options('messages')]
     self.msg_types.sort()
Пример #5
0
 def __init__(self, **kwargs):
     self.filename = kwargs.get('filename', '.gitsubtrees')
     self.parser = SafeConfigParser()
     self.subtrees = {}
Пример #6
0
def ReadConfig(configfile):
    """
    Read the config file and set all the variables.
    """

    # Read config file
    cfg = SafeConfigParser()
    cfg.optionxform = str

    try:
        with codecs.open(autosub.CONFIGFILE, 'r', autosub.SYSENCODING) as f:
            cfg.readfp(f)
    except:
        Message = WriteConfig()
        return

    # First we check whether the config has been upgraded
    if autosub.CONFIGVERSION < version.configversion:
        upgradeConfig(cfg, autosub.CONFIGVERSION, version.configversion)
    elif autosub.CONFIGVERSION > version.configversion:
        print "Config: ERROR! Config version higher then this version of AutoSub supports. Update AutoSub!"
        os._exit(1)
    section = 'config'
    if not cfg.has_section(section): cfg.add_section(section)
    if cfg.has_option(section, "configversion"):
        autosub.CONFIGVERSION = cfg.getint("config", "configversion")
    if cfg.has_option(section, "wantedfirst"):
        autosub.WANTEDFIRST = cfg.getboolean(section, "wantedfirst")
    if cfg.has_option(section, 'downloaddutch'):
        autosub.DOWNLOADDUTCH = cfg.getboolean(section, 'downloaddutch')
    if cfg.has_option(section, 'downloadeng'):
        autosub.DOWNLOADENG = cfg.getboolean(section, 'downloadeng')
    if cfg.has_option(section, "fallbacktoeng"):
        autosub.FALLBACKTOENG = cfg.getboolean(section, "fallbacktoeng")
    if cfg.has_option(section, "notifyen"):
        autosub.NOTIFYEN = cfg.getboolean(section, "notifyen")
    if cfg.has_option(section, "notifynl"):
        autosub.NOTIFYNL = cfg.getboolean(section, "notifynl")
    if cfg.has_option(section, "launchbrowser"):
        autosub.LAUNCHBROWSER = cfg.getboolean(section, "launchbrowser")
    if cfg.has_option(section, "skiphiddendirs"):
        autosub.SKIPHIDDENDIRS = cfg.getboolean(section, "skiphiddendirs")
    if cfg.has_option(section, "englishsubdelete"):
        autosub.ENGLISHSUBDELETE = cfg.getboolean(section, "englishsubdelete")
    if cfg.has_option(section, "podnapisi"):
        autosub.PODNAPISI = cfg.getboolean(section, "podnapisi")
    if cfg.has_option(section, "subscene"):
        autosub.SUBSCENE = cfg.getboolean(section, "subscene")
    if cfg.has_option(section, "addic7ed"):
        autosub.ADDIC7ED = cfg.getboolean(section, "addic7ed")
    if cfg.has_option(section, "hearingimpaired"):
        autosub.HI = cfg.getboolean(section, "hearingimpaired")
    if cfg.has_option(section, 'minmatchscore'):
        autosub.MINMATCHSCORE = cfg.getint(section, 'minmatchscore')
    if cfg.has_option(section, 'searchinterval'):
        autosub.SEARCHINTERVAL = cfg.getint(section, 'searchinterval')
    if cfg.has_option(section, "browserrefresh"):
        autosub.BROWSERREFRESH = cfg.getint(section, "browserrefresh")
    if cfg.has_option(section, "subeng"):
        autosub.SUBENG = cfg.get(section, "subeng")
    if cfg.has_option(section, "subnl"):
        autosub.SUBNL = cfg.get(section, "subnl")
    if cfg.has_option(section, "postprocesscmd"):
        autosub.POSTPROCESSCMD = cfg.get(section, "postprocesscmd")
    if cfg.has_option(section, "opensubtitlesuser"):
        autosub.OPENSUBTITLESUSER = cfg.get(section, "opensubtitlesuser")
    if cfg.has_option(section, "opensubtitlespasswd"):
        autosub.OPENSUBTITLESPASSWD = cfg.get(section, "opensubtitlespasswd")
    if cfg.has_option(section, "addic7eduser"):
        autosub.ADDIC7EDUSER = cfg.get(section, "addic7eduser")
    if cfg.has_option(section, "addic7edpasswd"):
        autosub.ADDIC7EDPASSWD = cfg.get(section, "addic7edpasswd")
    if cfg.has_option(section, "logfile"):
        autosub.LOGFILE = cfg.get(section, "logfile")
    if cfg.has_option(section, "subcoautosub"):
        autosub.SUBCODEC = cfg.get(section, "subcodec")
    if cfg.has_option(section, "rootpath"):
        autosub.SERIESPATH = cfg.get(section, "rootpath")
    if cfg.has_option(section, "seriespath"):
        autosub.SERIESPATH = cfg.get(section, "seriespath")
    if cfg.has_option(section, "skipstringnl"):
        autosub.SKIPSTRINGNL = cfg.get(section, "skipstringnl")
    if cfg.has_option(section, "skipstringen"):
        autosub.SKIPSTRINGEN = cfg.get(section, "skipstringen")
    if cfg.has_option(section, "skipfoldersnl"):
        autosub.SKIPFOLDERSNL = cfg.get(section, "skipfoldersnl")
    if cfg.has_option(section, "skipfolderSen"):
        autosub.SKIPFOLDERSEN = cfg.get(section, "skipfoldersen")

    # *******************
    # * Logfile Section *
    # *******************
    section = 'logfile'
    if not cfg.has_section(section): cfg.add_section(section)
    if cfg.has_option(section, "loglevel"):
        LogLevel = cfg.get(section, "loglevel").upper()
        if LogLevel == u'ERROR':
            autosub.LOGLEVEL = logging.ERROR
        elif LogLevel == u"WARNING":
            autosub.LOGLEVEL = logging.WARNING
        elif LogLevel == u"DEBUG":
            autosub.LOGLEVEL = logging.DEBUG
        elif LogLevel == u"INFO":
            autosub.LOGLEVEL = logging.INFO
        elif LogLevel == u"CRITICAL":
            autosub.LOGLEVEL = logging.CRITICAL

    if cfg.has_option(section, "loglevelconsole"):
        LogLevel = cfg.get(section, "loglevelconsole").upper()
        if LogLevel == u'ERROR':
            autosub.LOGLEVELCONSOLE = logging.ERROR
        elif LogLevel == u"WARNING":
            autosub.LOGLEVELCONSOLE = logging.WARNING
        elif LogLevel == u"DEBUG":
            autosub.LOGLEVELCONSOLE = logging.DEBUG
        elif LogLevel == u"INFO":
            autosub.LOGLEVELCONSOLE = logging.INFO
        elif LogLevel == u"CRITICAL":
            autosub.LOGLEVELCONSOLE = logging.CRITICAL

    if cfg.has_option(section, "logsize"):
        autosub.LOGSIZE = cfg.getint(section, "logsize")
    if cfg.has_option(section, "lognum"):
        autosub.LOGNUM = cfg.getint(section, "lognum")

    # ******************************
    # * Cherrypy Webserver Section *
    # ******************************
    section = 'webserver'
    if not cfg.has_section(section): cfg.add_section(section)

    if cfg.has_option(section, 'webserverip'):
        autosub.WEBSERVERIP = cfg.get(section, 'webserverip')
    if cfg.has_option(section, 'webserverport'):
        autosub.WEBSERVERPORT = int(cfg.get(section, 'webserverport'))
    if cfg.has_option(section, 'webroot'):
        autosub.WEBROOT = cfg.get(section, 'webroot')
    if cfg.has_option(section, 'username'):
        autosub.USERNAME = cfg.get(section, 'username')
    if cfg.has_option(section, 'password'):
        autosub.PASSWORD = cfg.get(section, 'password')

    # ********************
    # * SkipShow Section *
    # ********************
    section = 'skipshow'
    if not cfg.has_section(section): cfg.add_section(section)

    autosub.SKIPSHOWUPPER = {}
    autosub.SKIPSHOW = {}
    SkipShows = dict(cfg.items(section))
    #autosub.SKIPSHOW = dict(cfg.items('skipshow'))
    # The following 5 lines convert the skipshow to uppercase. And also convert the variables to a list
    # also replace the "~" with ":" neccesary because the config parser sees ":" as a delimiter
    for show in SkipShows:
        if re.match("^[0-9 ,.-]+$", SkipShows[show]):
            autosub.SKIPSHOW[show.replace('~', ':')] = SkipShows[show]
            autosub.SKIPSHOWUPPER[show.upper().replace('~', ':')] = [
                Item.strip() for Item in SkipShows[show].split(',')
            ]

    # ***********************
    # * Namemapping Section *
    # ***********************
    section = 'namemapping'
    if not cfg.has_section(section): cfg.add_section(section)

    NameMapping = dict(cfg.items(section))
    autosub.USERNAMEMAPPING = {}
    autosub.USERNAMEMAPPINGUPPER = {}
    for Name in NameMapping:
        if NameMapping[Name].isdigit():
            autosub.USERNAMEMAPPING[Name.replace('~', ':')] = NameMapping[Name]
            autosub.USERNAMEMAPPINGUPPER[Name.upper().replace('~', ':')] = [
                Item.strip() for Item in NameMapping[Name].split(',')
            ]

    # ********************************
    # * Addic7ed Namemapping Section *
    # ********************************
    section = 'addic7edmapping'
    if not cfg.has_section(section): cfg.add_section(section)

    NameMapping = dict(cfg.items(section))
    autosub.USERADDIC7EDMAPPING = {}
    for Name in NameMapping:
        if Name.isdigit and NameMapping[Name].isdigit():
            autosub.USERADDIC7EDMAPPING[Name] = NameMapping[Name]

    # ******************
    # * Notify Section *
    # ******************
    section = 'notify'
    if not cfg.has_section(section): cfg.add_section(section)

    if cfg.has_option(section, 'notifymail'):
        autosub.NOTIFYMAIL = cfg.getboolean(section, 'notifymail')
    if cfg.has_option(section, 'mailsrv'):
        autosub.MAILSRV = cfg.get(section, 'mailsrv')
    if cfg.has_option(section, 'mailfromaddr'):
        autosub.MAILFROMADDR = cfg.get(section, 'mailfromaddr')
    if cfg.has_option(section, 'mailtoaddr'):
        autosub.MAILTOADDR = cfg.get(section, 'mailtoaddr')
    if cfg.has_option(section, 'mailusername'):
        autosub.MAILUSERNAME = cfg.get(section, 'mailusername')
    if cfg.has_option(section, 'mailpassword'):
        autosub.MAILPASSWORD = cfg.get(section, 'mailpassword')
    if cfg.has_option(section, 'mailsubject'):
        autosub.MAILSUBJECT = cfg.get(section, 'mailsubject')
    if cfg.has_option(section, 'mailencryption'):
        autosub.MAILENCRYPTION = cfg.get(section, 'mailencryption')
    if cfg.has_option(section, 'mailauth'):
        autosub.MAILAUTH = cfg.get(section, 'mailauth')
    if cfg.has_option(section, 'notifygrowl'):
        autosub.NOTIFYGROWL = cfg.getboolean(section, 'notifygrowl')
    if cfg.has_option(section, 'growlhost'):
        autosub.GROWLHOST = cfg.get(section, 'growlhost')
    if cfg.has_option(section, 'growlport'):
        autosub.GROWLPORT = cfg.get(section, 'growlport')
    if cfg.has_option(section, 'growlpass'):
        autosub.GROWLPASS = cfg.get(section, 'growlpass')
    if cfg.has_option(section, 'notifytwitter'):
        autosub.NOTIFYTWITTER = cfg.getboolean(section, 'notifytwitter')
    if cfg.has_option(section, 'twitterkey'):
        autosub.TWITTERKEY = cfg.get(section, 'twitterkey')
    if cfg.has_option(section, 'twittersecret'):
        autosub.TWITTERSECRET = cfg.get(section, 'twittersecret')
    if cfg.has_option(section, 'notifynma'):
        autosub.NOTIFYNMA = cfg.getboolean(section, 'notifynma')
    if cfg.has_option(section, 'nmaapi'):
        autosub.NMAAPI = cfg.get(section, 'nmaapi')
    if cfg.has_option(section, 'nmapriority'):
        autosub.NMAPRIORITY = cfg.getint(section, 'nmapriority')
    if cfg.has_option(section, 'notifyprowl'):
        autosub.NOTIFYPROWL = cfg.getboolean(section, 'notifyprowl')
    if cfg.has_option(section, 'prowlapi'):
        autosub.PROWLAPI = cfg.get(section, 'prowlapi')
    if cfg.has_option(section, 'prowlpriority'):
        autosub.PROWLPRIORITY = cfg.getint(section, 'prowlpriority')
    if cfg.has_option(section, 'notifypushalot'):
        autosub.NOTIFYPUSHALOT = cfg.getboolean(section, 'notifypushalot')
    if cfg.has_option(section, 'pushalotapi'):
        autosub.PUSHALOTAPI = cfg.get(section, 'pushalotapi')
    if cfg.has_option(section, 'notifypushbullet'):
        autosub.NOTIFYPUSHBULLET = cfg.getboolean(section, 'notifypushbullet')
    if cfg.has_option(section, 'pushbulletapi'):
        autosub.PUSHBULLETAPI = cfg.get(section, 'pushbulletapi')
    if cfg.has_option(section, 'notifypushover'):
        autosub.NOTIFYPUSHOVER = cfg.getboolean(section, 'notifypushover')
    if cfg.has_option(section, 'pushoverappkey'):
        autosub.PUSHOVERAPPKEY = cfg.get(section, 'pushoverappkey')
    if cfg.has_option(section, 'pushoveruserkey'):
        autosub.PUSHOVERUSERKEY = cfg.get(section, 'pushoveruserkey')
    if cfg.has_option(section, 'notifyboxcar2'):
        autosub.NOTIFYBOXCAR2 = cfg.getboolean(section, 'notifyboxcar2')
    if cfg.has_option(section, 'boxcar2token'):
        autosub.BOXCAR2TOKEN = cfg.get(section, 'boxcar2token')
    if cfg.has_option(section, 'notifyplex'):
        autosub.NOTIFYPLEX = cfg.getboolean(section, 'notifyplex')
    if cfg.has_option(section, 'plexserverhost'):
        autosub.PLEXSERVERHOST = cfg.get(section, 'plexserverhost')
    if cfg.has_option(section, 'plexserverport'):
        autosub.PLEXSERVERPORT = cfg.get(section, 'plexserverport')

    # Settings

    autosub.NAMEMAPPING = {
        "Against the Wall": "1836237",
        "alcatraz": "1728102",
        "almost human": "2654580",
        "alphas": "1183865",
        "american dad": "0397306",
        "american horror story": "1844624",
        "appropriate adult": "1831575",
        "Are You There Chelsea": "1826989",
        "atlantis": "2705602",
        "atlantis 2013": "2705602",
        "awkward": "1663676",
        "back in the game": "2655470",
        "Bates Motel": "2188671",
        "beauty and the beast": "2193041",
        "beauty and the beast 2012": "2193041",
        "betrayal": "2751074",
        "blue bloods": "1595859",
        "boardwalk empire": "0979432",
        "bob's burgers": "1561755",
        "bobs burgers": "1561755",
        "Body of Proof": "1587669",
        "borgen": "1526318",
        "breakout kings": "1590961",
        "breaking bad": "903747",
        "Castle (2009)": "1219024",
        "castle 2009": "1219024",
        "charlie's angels 2011": "1760943",
        "Charlies Angels 2011": "1760943",
        "chicago fire": "2261391",
        "chicago fire (2012)": "2261391",
        "chicago pd": "2805096",
        "chicago p.d": "2805096",
        "chicago p.d.": "2805096",
        "Common Law 2012": "1771072",
        "continuum": "1954347",
        "covert affairs": "1495708",
        "cracked (2013)": "2078576",
        "criminal minds": "0452046",
        "csi": "0247082",
        "csi crime scene investigation": "0247082",
        "Csi Miami": "0313043",
        "csi new york": "0395843",
        "csi ny": "0395843",
        "Da Vinci's Demons": "2094262",
        "Dallas 2012": "1723760",
        "desperate housewives": "0410975",
        "devious maids": "2226342",
        "Doctor Who": "0436992",
        "Doctor Who (2005)": "0436992",
        "don't trust the b---- in apartment 23": "1819509",
        "dont trust the bitch in apartment 23": "1819509",
        "dracula": "2296682",
        "dracula (2013)": "2296682",
        "DreamWorks Dragons: Riders of Berk": "2325846",
        "eastbound & down": "0866442",
        "eastbound and down": "0866442",
        "emily owens m d": "2290339",
        "Falling skies": "1462059",
        "Fast N Loud": "2346169",
        "Femme Fatales": "1841108",
        "Franklin and Bash": "1600199",
        "Free Agents": "1839481",
        "Free Agents Us": "1839481",
        "fringe": "1119644",
        "game of thrones": "0944947",
        "Glee": "1327801",
        "Grey's Anatomy": "0413573",
        "Greys Anatomy": "0413573",
        "grimm": "1830617",
        "harry's law": "1582453",
        "Harrys Law": "1582453",
        "haven": "1519931",
        "Hawaii Five 0": "1600194",
        "Hawaii Five 0 2010": "1600194",
        "Hawaii Five-0": "1600194",
        "hawaii five-0 2010": "1600194",
        "hello ladies": "2378794",
        "homeland": "1796960",
        "hostages": "2647258",
        "house of cards 2013": "1856010",
        "how i met your mother": "0460649",
        "How To Survive The End Of The World": "3377330",
        "Intelligence us": "2693776",
        "king": "1804880",
        "kings of crash": "2623754",
        "Last Man Standing": "1828327",
        "Last Man Standing Us": "1828327",
        "law and order svu": "0203259",
        "law and order uk": "1166893",
        "longmire": "1836037",
        "luck": "1578887",
        "luther": "1474684",
        "Man Up": "1828238",
        "marvel's agents of s h i e l d": "2364582",
        "marvels agents of s h i e l d": "2364582",
        "marvel agents of shield": "2364582",
        "agents of s h i e l d": "2364582",
        "masters of sex": "2137109",
        "Melissa And Joey": "1597420",
        "Merlin": "1199099",
        "Merlin 2008": "1199099",
        "Mike and Molly": "1608180",
        "missing 2012": "1828246",
        "mockingbird lane": "2130271",
        "modern family": "1442437",
        "moonshiners": "1877005",
        "Mr Sunshine": "1583638",
        "nashville": "2281375",
        "nashville 2012": "2281375",
        "ncis": "0364845",
        "Ncis Los Angeles": "1378167",
        "Necessary Roughness": "1657505",
        "new girl": "1826940",
        "new tricks": "0362357",
        "nip tuck": "0361217",
        "nip-tuck": "0361217",
        "once upon a time": "1843230",
        "once upon time": "1843230",
        "once upon a time 2011": "1843230",
        "once upon a time in wonderland": "2802008",
        "oppenheimer (1980)": "0078037",
        "Parks and Recreation": "1266020",
        "person of interest": "1839578",
        "played": "2886812",
        "pretty little liars": "1578873",
        "Prime Suspect Us": "1582456",
        "primeval new world": "2295953",
        "ray donovan": "2249007",
        "reign 2013": "2710394",
        "Revolution": "2070791",
        "Revolution 2012": "2070791",
        "Rizzoli And Isles": "1551632",
        "rookie blue": "1442065",
        "Scandal": "1837576",
        "scandal (2012)": "1837576",
        "Scandal 2012": "1837576",
        "Scandal US": "1837576",
        "scott and bailey": "1843678",
        "sean saves the world": "2715776",
        "Shameless Us": "1586680",
        "silent witness": "0115355",
        "Sinbad": "1979918",
        "sleepy hollow": "2647544",
        "snooki and jwoww": "2083701",
        "sons of anarchy": "1124373",
        "South Park": "0121955",
        "Spartacus": "1442449",
        "Spartacus Blood And Sand": "1442449",
        "Spartacus Gods Of The Arena": "1758429",
        "spartacus vengeance": "1442449",
        "star wars the clone wars": "0458290",
        "suburgatory": "1741256",
        "suits": "1632701",
        "sun, sex and suspicious parents": "1832153",
        "super fun night": "2298477",
        "The After": "3145422",
        "the americans 2013": "2149175",
        "the americans (2013)": "2149175",
        "the americans": "2149175",
        "the big bang theory": "898266",
        "the biggest loser": "0429318",
        "the blacklist": "2741602",
        "the client list": "2022170",
        "the closer": "0458253",
        "the dukes of hazzard": "78607",
        "the gadget show": "0830851",
        "The Kennedys": "1567215",
        "the killing (2011)": "1637727",
        "The La Complex": "1794147",
        "The Legend Of Korra": "1695360",
        "the lying game": "1798274",
        "the mentalist": "1196946",
        "the newsroom (2012)": "1870479",
        "the newsroom 2012": "1870479",
        "the o c": "0362359",
        "the office us": "0386676",
        "the originals": "2632424",
        "the piglet files": "0098895",
        "the protector": "1836417",
        "The River": "1836195",
        "the tomorrow people us": "2660734",
        "the walking dead": "1520211",
        "the wire": "306414",
        "the wrong mans": "2603596",
        "thundercats 2011": "1666278",
        "Touch": "1821681",
        "trophy wife": "2400736",
        "two and a half men": "0369179",
        "under the dome": "1553656",
        "unforgettable": "1842530",
        "untouchables-the venture bros": "0417373",
        "Up All Night 2011": "1843323",
        "utopia": "2384811",
        "Vegas": "2262383",
        "white collar": "1358522",
        "xiii the series 2011": "1713938"
    }

    autosub.NAMEMAPPINGUPPER = {}
    for x in autosub.NAMEMAPPING.keys():
        autosub.NAMEMAPPINGUPPER[x.upper()] = autosub.NAMEMAPPING[x]
    autosub.LASTESTDOWNLOAD = []
Пример #7
0
def isItCorrelated(name):
    print '\t ----> isItCorrelated: testing ', name
    if ('_eff_b' in name or '_les' in name or '_pu' in name or '_umet' in name
            or '_res_j' in name or '_scale_j' in name):
        print '-> true'
        return True
    else:
        print '-> false'
        return False


parser = OptionParser(
    description=
    "%prog : A RooStats Implementation of Anomalous Triple Gauge Coupling Analysis.",
    usage="buildWZworkspace --config=example_config.cfg")
cfgparse = SafeConfigParser()

parser.add_option("--config",
                  dest="config",
                  help="The name of the input configuration file.")
(options, args) = parser.parse_args()

miss_options = False

if options.config is None:
    print 'Need to specify --config'
    miss_options = True

if miss_options:
    exit(1)
Пример #8
0
def main():
    logger.logInfo("Welcome to the Reddit SubredditAnalytics Bot.")

    # Get the user information from the configuration file
    config = SafeConfigParser()
    config.read('config.ini')

    SAB.login(myBot, config.get('reddit', 'username'),
              config.get('reddit', 'password'),
              config.get('reddit', 'user_agent'))
    myBot.post_to = config.get('reddit', 'subreddit')

    while (1):
        # Load the subreddits to process
        subredditsProcessed = open('subsProcessed.txt').read().splitlines()
        subredditsToProcess = open('subsToProcess.txt').read().splitlines()

        while len(subredditsToProcess) > 0:
            subreddit = subredditsToProcess.pop(0)

            if subreddit not in subredditsProcessed:
                logger.logInfo('Processing {0}'.format(subreddit))

                # Create network object that will hold the nodes and edges
                myNet = SAN.SubredditAnalyticsNet()
                myNet.parentSub = subreddit
                myNet.logger.setLogLevel(consolelogLevel)

                # Add the processed subreddit to the bot's ban list
                myBot.subBanList.append(subreddit)

                try:
                    # Retrieve users, randomize, and take a sample for further processing
                    userList = myBot.getUsers(subreddit)

                    subList = list()
                    # Get each user's subscriptions and add them to the network
                    for index, user in enumerate(userList):
                        subList = myBot.getSubs(user)
                        logger.logInfo(
                            'Processed user {0} of {1} - {2} - {3} unique subs'
                            .format(index + 1, len(userList), user,
                                    len(subList)))
                        if subList:
                            myNet.add_users_node(user, subList)

                    # Save the JSON network file
                    myNet.processNetforSave(outputSubs, myBot)
                    myNet.saveDATAfile(len(userList))

                    logger.logInfo('Uploading via FTP')
                    try:
                        session = FTP(config.get('ftp', 'server'),
                                      config.get('ftp', 'username'),
                                      config.get('ftp', 'password'))
                        session.set_pasv(True)
                        session.cwd('JSON')
                        source = open('html/JSON/' + subreddit + '.json', 'rb')
                        session.storbinary('STOR ' + subreddit + '.json',
                                           source)
                        source.close()
                        session.quit()
                    except Exception, e:
                        logger.logError('FTP:' + str(e))

                    logger.logInfo('Submitting to Reddit')
                    try:
                        URL = "http://redditanalytics.altervista.org/show.php?subreddit={0}".format(
                            subreddit)
                        newsubmission = submit_post(myBot, subreddit, URL)
                    except Exception, e:
                        logger.logError('Reddit Submission: ' + str(e))

                    # Add top 5 subreddits to analyze to the queue
                    topSubreddits = myNet.subredditsSortedByUsers()[:5]
                    for (sub, n) in topSubreddits:
                        if sub not in subredditsToProcess:
                            if sub not in subredditsProcessed:
                                subredditsToProcess.append(sub)

                    # Remove the processed subreddit from the ban list and append to processed
                    myBot.subBanList.remove(subreddit)
                    subredditsProcessed.append(subreddit)

                except HTTPError, e:
                    if e.response.status_code == 403:
                        logger.logCrit(
                            'Critical error! {0} - Subreddit not found'.format(
                                e))
                        myBot.subBanList.remove(subreddit)
                        subredditsProcessed.append(subreddit)
                    else:
                        logger.logCrit('Critical error! {0}'.format(e))
                        myBot.subBanList.remove(subreddit)
                        subredditsToProcess.append(subreddit)
                del myNet
Пример #9
0
 def _readConfigFile(self, configFile):
     configParser = SafeConfigParser()
     configParser.read(configFile)
     return configParser
Пример #10
0
from xml.dom import minidom
from httpcomm import HTTPComm
from ConfigParser import SafeConfigParser
import json
from random import randrange
import Queue
import threading

# Import JSON - compatible with Python<v2.6
try:
    import json
except ImportError:
    import simplejson as json

# Config parser
pluginConfig = SafeConfigParser()
pluginConfig.read(os.path.join(os.path.dirname(__file__), "config.ini"))

# Various constants used throughout the script
HANDLE = int(sys.argv[1])
ADDON = xbmcaddon.Addon(id=pluginConfig.get('plugin', 'id'))

# Plugin constants
__plugin__ = ADDON.getAddonInfo('name')
__author__ = "Tim C. Steinmetz"
__url__ = "http://qualisoft.dk/"
__platform__ = "xbmc media center, [LINUX, OS X, WIN32]"
__date__ = pluginConfig.get('plugin', 'date')
__version__ = ADDON.getAddonInfo('version')
"""
 Thread class used for scraping individual playlists for each channel
Пример #11
0
    def __init__(self, path):

        self.parser = SafeConfigParser()
        self.path = path
def main() :

	args = parseArgument()

	print
	print("## AndroBugs Framework: Android APK Vulnerability Scanner - Massive Tool ##")
	print

	ANALYZE_MODE_MASSIVE = "massive"

	if args.ignore_duplicated_scanning :

		from pymongo import MongoClient
		from ConfigParser import SafeConfigParser

		if platform.system().lower() == "windows" :
			import sys
			db_config_file = os.path.join(os.path.dirname(sys.executable), 'androbugs-db.cfg')
		else :
			db_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'androbugs-db.cfg')

		if not os.path.isfile(db_config_file) :
			print("[ERROR] AndroBugs Framework DB config file not found: " + db_config_file)
			traceback.print_exc()

		configParser = SafeConfigParser()
		configParser.read(db_config_file)

		MongoDB_Hostname = configParser.get('DB_Config', 'MongoDB_Hostname')
		MongoDB_Port = configParser.getint('DB_Config', 'MongoDB_Port')
		MongoDB_Database = configParser.get('DB_Config', 'MongoDB_Database')

		Collection_Analyze_Result = configParser.get('DB_Collections', 'Collection_Analyze_Result')

		client = MongoClient(MongoDB_Hostname, MongoDB_Port)
		db = client[MongoDB_Database]	# Name is case-sensitive
		collection_AppInfo = db[Collection_Analyze_Result]		# Name is case-sensitive

		print("[Notice] APK with the same \"package_name\", \"analyze_engine_build\" and \"analyze_tag\" will not be analyzed again.")
		print

	input_dir = os.path.realpath(args.input_apk_dir)
	output_dir = os.path.realpath(args.report_output_dir)

	if (not os.path.isdir(input_dir)) :
		print("APK input directory does not exist.")
		sys.exit()

	dir_names = os.listdir(input_dir)
	total_dir = len(dir_names)
	current_file = 0

	for filename in dir_names:
		if filename.endswith(".apk") :
			current_file = current_file + 1

			package_name = filename[:-4]

			print("Analyzing APK(" + str(current_file) + "/" + str(total_dir) + "): " + filename)

			if args.ignore_duplicated_scanning :  #check if already scanned

				query_condition = { "analyze_mode" : ANALYZE_MODE_MASSIVE, 
									"package_name": package_name, 
									"analyze_engine_build": args.analyze_engine_build,
									"analyze_tag": args.analyze_tag }

				boolHasResult = False

				query_result = collection_AppInfo.find(query_condition)
				
				for result in query_result :
					boolHasResult = True
					break

				if boolHasResult :
					print(" ->Package name [" + package_name + "] has already in DB. Ignore analyzing it.")
					continue
			
			try:

				if platform.system().lower() == "windows" :
					main_cmd = "androbugs.exe"
				else :
					main_cmd = "python androbugs.py"

				cmd = main_cmd + " -s -v -e " + str(args.extra) + " -f " + os.path.join(input_dir, filename) + " -o " + output_dir + " -m " + ANALYZE_MODE_MASSIVE + " -b " + str(args.analyze_engine_build) + " -t " + str(args.analyze_tag)
				#print(cmd)
				process = os.popen(cmd)
				preprocessed = process.read()
				process.close()

			except KeyboardInterrupt :
				print("Stopped.")
				break
			except Exception as err:
				print(err)
				pass
Пример #13
0
class ExecuteTestSuite:

    parser = SafeConfigParser()
    parser.read('Config.ini')
    executeMode = parser.get('Config', 'TYPE_OF_EXECUTION')
    executeMode = executeMode.lower()
    testdatafiletype = parser.get('Config', 'TestDateFileType')
    testdatafiletype = testdatafiletype.lower()
    Host_URL = parser.get('Config', 'Host_URL')
    Host_URL = Host_URL.lower()
    # Reading Test_Suite.xls file
    xl_workbook = xlrd.open_workbook('Test_Suite.xls')
    workbooksheet_obj = xl_workbook.sheet_by_name('Tests')
    requestRowCount = int(workbooksheet_obj.nrows)
    requestColumnCount = int(workbooksheet_obj.ncols)
    obj = TestExecutionMode()
    testCasesToExecuteList = []
    if testdatafiletype == "xls":
        testCasesToExecuteList = obj.getExecutionType(executeMode,
                                                      workbooksheet_obj,
                                                      requestRowCount)
        print testCasesToExecuteList
    if testdatafiletype == "json":
        with open('TestSuite_new.json') as json_file:
            filedata = json_file.read()
        json_testdata = json.loads(filedata)
        count = 0
        for i in range(0, json_testdata.__len__()):
            if json_testdata[i]['EXECUTE'].lower() == "yes":
                count = count + 1
                testCasesToExecuteList.append(count)
        print testCasesToExecuteList

    def make_method(self, tcId, result):
        def test_input(self):
            if result:
                pass
            else:
                self.fail()

    #         func(self, tcId)

        if self.testdatafiletype == "xls":
            test_input.__name__ = 'test_{str_tc_description}'.format(
                str_tc_description=self.getValues('TC_DESCRIPTION', tcId))
        if self.testdatafiletype == "json":
            test_input.__name__ = 'test_{str_tc_description}'.format(
                str_tc_description=tcId)
        return test_input

    def json(self):
        def decorator(klass):
            for i in range(0, self.json_testdata.__len__()):
                if self.json_testdata[i]['EXECUTE'].lower() == "yes":
                    print "******" * 40
                    print "Testcase Descrition --> : ", self.json_testdata[
                        i].get("TC_DESCRIPTION")
                    self.str_tc_description = str(
                        self.json_testdata[i].get("TC_DESCRIPTION"))
                    print "Testcase ID --> : " + self.json_testdata[i].get(
                        "TC_ID")
                    self.str_tc_id = str(self.json_testdata[i].get("TC_ID"))
                    self.str_tc_methodname = str(
                        self.json_testdata[i].get("Method_Name"))
                    print "Method Name : ", self.str_tc_methodname
                    if (self.json_testdata[i].get("Reguest_Body")
                            == None) or (str(
                                self.json_testdata[i].get("Reguest_Body"))
                                         == ""):
                        self.str_tc_requestbody = self.json_testdata[i].get(
                            "Reguest_Body")
                    else:
                        self.str_tc_requestbody = self.json_testdata[i].get(
                            "Reguest_Body")
                    print "Request Body : ", self.str_tc_requestbody
                    self.str_tc_requestheaders = self.json_testdata[i].get(
                        "Request_Headers")
                    print "Request Headers : ", self.str_tc_requestheaders
                    self.str_tc_endpointurl = str(
                        self.json_testdata[i].get("EndPoint_URL"))
                    print "End Point URL : ", self.str_tc_endpointurl
                    self.str_tc_expectedstatuscode = str(
                        self.json_testdata[i].get("Expected_Status_Code"))
                    print "Expected Status Code : ", self.str_tc_expectedstatuscode
                    self.str_tc_expectedelementpath = str(
                        self.json_testdata[i].get("Expected_ElementPath"))
                    print "Expected Element Path in JSon Response : ", self.str_tc_expectedelementpath
                    self.str_tc_expectedvalue = str(
                        self.json_testdata[i].get("Expected_Value"))
                    print "Expected Value : ", self.str_tc_expectedvalue
                    print "##" * 40
                    result = self.restMethodCall(
                        self.str_tc_description, self.str_tc_methodname,
                        self.str_tc_requestbody, self.str_tc_requestheaders,
                        self.str_tc_endpointurl,
                        self.str_tc_expectedstatuscode,
                        self.str_tc_expectedelementpath,
                        self.str_tc_expectedvalue)
                    test_input = self.make_method(self.str_tc_description,
                                                  result)
                    setattr(klass, test_input.__name__, test_input)
            return klass

        return decorator

    def xls(self, testCasesToExecuteList):

        if testCasesToExecuteList.__len__() > 0:
            print "testCasesToExecuteList.size :", testCasesToExecuteList.__len__(
            )

            def decorator(klass):
                for tcId in testCasesToExecuteList:
                    self.str_tc_description = str(
                        self.getValues('TC_DESCRIPTION', tcId))
                    print self.str_tc_description
                    self.str_tc_methodname = str(
                        self.getValues('Method Name', tcId))
                    print self.str_tc_methodname
                    if (self.getValues('Reguest Body',
                                       tcId) == None) or (self.getValues(
                                           'Reguest Body', tcId) == ""):
                        self.str_tc_requestbody = str(
                            self.getValues('Reguest Body', tcId))
                    else:
                        self.str_tc_requestbody = eval(
                            json.loads(
                                json.dumps(self.getValues(
                                    'Reguest Body', tcId))))
                    print self.str_tc_requestbody
                    self.str_tc_requestheaders = str(
                        self.getValues('Request Headers', tcId))
                    print self.str_tc_requestheaders
                    self.str_tc_endpointurl = str(
                        self.getValues('End Point URL', tcId))
                    print self.str_tc_endpointurl
                    self.str_tc_expectedstatuscode = str(
                        self.getValues('Expected Status Code', tcId))
                    print self.str_tc_expectedstatuscode
                    self.str_tc_expectedelementpath = str(
                        self.getValues('Expected Element Path & Name', tcId))
                    print self.str_tc_expectedelementpath
                    self.str_tc_expectedvalue = str(
                        self.getValues('Expected Value', tcId))
                    print self.str_tc_expectedvalue
                    result = self.restMethodCall(
                        self.str_tc_description, self.str_tc_methodname,
                        self.str_tc_requestbody, self.str_tc_requestheaders,
                        self.str_tc_endpointurl,
                        self.str_tc_expectedstatuscode,
                        self.str_tc_expectedelementpath,
                        self.str_tc_expectedvalue)
                    test_input = self.make_method(tcId, result)
                    setattr(klass, test_input.__name__, test_input)
                return klass

            return decorator

#     def json_old(self):
#
#         with open('TestSuite.json') as json_file:
#             data = json_file.read()
#         data1 = json.loads(data)
#         for i in range(0, data1.__len__()):
#             if data1[i]['execute'].lower() == "yes" :
#                 for j in range(0, data1[i]["Testcase_Testdata"].__len__()):
#                     testdata = data1[i]["Testcase_Testdata"][j]
#                     print ""*40
#                     print "******"*40
#                     print ""*40
#                     print "Testcase Descrition --> : " , testdata.get("TC_DESCRIPTION")
#                     self.str_tc_description = str(testdata.get("TC_DESCRIPTION"))
#                     print "Testcase ID --> : " + testdata.get("TC_ID")
#                     self.str_tc_id = str(testdata.get("TC_ID"))
#                     self.str_tc_methodname = str(testdata.get("Method_Name"))
#                     print "Method Name : " , self.str_tc_methodname
#                     self.str_tc_requestbody = str(testdata.get("Reguest_Body"))
#                     print  "Request Body : " , self.str_tc_requestbody
#                     self.str_tc_requestheaders = testdata.get("Request_Headers")
#                     print  "Request Headers : " , self.str_tc_requestheaders
#                     self.str_tc_endpointurl = str(testdata.get("EndPoint_URL"))
#                     print  "End Point URL : " , self.str_tc_endpointurl
#                     self.str_tc_expectedstatuscode =  str(testdata.get("Expected_Status_Code"))
#                     print  "Expected Status Code : " , self.str_tc_expectedstatuscode
#                     self.str_tc_expectedelementpath =  str(testdata.get("Expected_ElementPath"))
#                     print  "Expected Element Path in JSon Response : " , self.str_tc_expectedelementpath
#                     self.str_tc_expectedvalue =  str(testdata.get("Expected_Value"))
#                     print  "Expected Value : " , self.str_tc_expectedvalue
#                     self.restMethodCall(self.str_tc_description, self.str_tc_methodname, self.str_tc_requestbody, self.str_tc_requestheaders, self.str_tc_endpointurl, self.str_tc_expectedstatuscode, self.str_tc_expectedelementpath, self.str_tc_expectedvalue)

    def xml(self, worksheetObj, requestRowCount):

        pass

    switcher = {"xls": xls, "json": json, "xml": xml}

    def run_setExecutionType(self, testCasesToExecuteList):
        if self.testdatafiletype == "json":
            func = self.switcher.get(self.testdatafiletype)
            return func(self)
        if self.testdatafiletype == "xls":
            func = self.switcher.get(self.testdatafiletype)
            return func(self, testCasesToExecuteList)

    def getValues(self, value, tcId):

        for row in range(self.workbooksheet_obj.nrows):
            for col in range(self.workbooksheet_obj.ncols):
                if self.workbooksheet_obj.cell_value(row, col) == value:
                    col1 = col
                if self.workbooksheet_obj.cell_value(row, col) == tcId:
                    row1 = row
        return self.workbooksheet_obj.cell(row1, col1).value

    def restMethodCall(self, str_tc_description, str_tc_methodname,
                       str_tc_requestbody, str_tc_requestheaders,
                       str_tc_endpointurl, str_tc_expectedstatuscode,
                       str_tc_expectedelementpath, str_tc_expectedvalue):

        rest = Rest()
        if str_tc_methodname.upper() == "GET":
            getrequestresult = rest.getRequest(self.Host_URL,
                                               str_tc_endpointurl,
                                               str_tc_requestheaders,
                                               str_tc_expectedstatuscode)
            if str(getrequestresult) == str_tc_expectedvalue:
                print "GET Request Test Case Passed"
                print "Actual Response   : ", getrequestresult
                print "Expected Response : ", str_tc_expectedvalue
                return True
            else:
                return False
        if str_tc_methodname.upper() == "POST":
            postrequestresult = rest.postRequest(self.Host_URL,
                                                 str_tc_endpointurl,
                                                 str_tc_requestbody,
                                                 str_tc_requestheaders,
                                                 str_tc_expectedstatuscode)
            if str(postrequestresult) == str_tc_expectedvalue:
                print "POST Request Test Case Passed"
                print "Actual Response   : ", postrequestresult
                print "Expected Response : ", str_tc_expectedvalue
                return True
            else:
                return False
        if str_tc_methodname.upper() == "PUT":
            putrequestresult = rest.putRequest(self.Host_URL,
                                               str_tc_endpointurl,
                                               str_tc_requestbody,
                                               str_tc_requestheaders,
                                               str_tc_expectedstatuscode)
            if str(putrequestresult) == str_tc_expectedvalue:
                print "PUT Request Test Case Passed"
                print "Actual Response   : ", putrequestresult
                print "Expected Response : ", str_tc_expectedvalue
                return True
            else:
                return False
        if str_tc_methodname.upper() == "DELETE":
            deleterequestresult = rest.deleteRequest(
                self.Host_URL, str_tc_endpointurl, str_tc_requestbody,
                str_tc_requestheaders, str_tc_expectedstatuscode)
            if str(deleterequestresult) == str_tc_expectedvalue:
                print "DELETE Request Test Case Passed"
                print "Actual Response   : ", deleterequestresult
                print "Expected Response : ", str_tc_expectedvalue
                return True
            else:
                return False
Пример #14
0
def parse_config():
    config = SafeConfigParser()
    config.read(config_file)
    return config
Пример #15
0
    def main():
        # setup logging
        ha_syslog = logging.handlers.SysLogHandler('/dev/log')
        ha_syslog.setFormatter(
            logging.Formatter('%(name)s[%(process)d]: %(message)s'))
        logging.root.addHandler(ha_syslog)

        # leave log for backwards compatibility
        ha_file = logging.FileHandler(LOG_PATH)
        ha_file.setFormatter(
            logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s'))
        logging.root.addHandler(ha_file)

        log = logging.getLogger('qmemman.daemon')

        usage = "usage: %prog [options]"
        parser = OptionParser(usage)
        parser.add_option("-c", "--config", action="store", dest="config", default=config_path)
        (options, args) = parser.parse_args()

        # close io
        sys.stdin.close()
        sys.stdout.close()
        sys.stderr.close()

        config = SafeConfigParser({
                'vm-min-mem': str(qmemman_algo.MIN_PREFMEM),
                'dom0-mem-boost': str(qmemman_algo.DOM0_MEM_BOOST),
                'cache-margin-factor': str(qmemman_algo.CACHE_FACTOR)
                })
        config.read(options.config)
        if config.has_section('global'):
            qmemman_algo.MIN_PREFMEM = parse_size(config.get('global', 'vm-min-mem'))
            qmemman_algo.DOM0_MEM_BOOST = parse_size(config.get('global', 'dom0-mem-boost'))
            qmemman_algo.CACHE_FACTOR = config.getfloat('global', 'cache-margin-factor')

        log.info('MIN_PREFMEM={qmemman_algo.MIN_PREFMEM}'
            ' DOM0_MEM_BOOST={qmemman_algo.DOM0_MEM_BOOST}'
            ' CACHE_FACTOR={qmemman_algo.CACHE_FACTOR}'.format(
                qmemman_algo=qmemman_algo))

        try:
            os.unlink(SOCK_PATH)
        except:
            pass

        log.debug('instantiating server')
        os.umask(0)
        server = SocketServer.UnixStreamServer(SOCK_PATH, QMemmanReqHandler)
        os.umask(077)

        # notify systemd
        nofity_socket = os.getenv('NOTIFY_SOCKET')
        if nofity_socket:
            log.debug('notifying systemd')
            s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
            if nofity_socket.startswith('@'):
                nofity_socket = '\0%s' % nofity_socket[1:]
            s.connect(nofity_socket)
            s.sendall("READY=1")
            s.close()

        thread.start_new_thread(start_server, tuple([server]))
        XS_Watcher().watch_loop()
Пример #16
0
def plugin_quickstart(parser, options, args=None):
    """A command-line script (plugin quickstart) points to this.  It generates a
    directory structure for an openmdao plugin package along with Sphinx docs.

    usage: plugin quickstart <dist_name> [-v <version>] [-d <dest_dir>] [-g <plugin_group>] [-c class_name]

    """
    if args:
        print_sub_help(parser, 'quickstart')
        return -1

    name = options.dist_name
    if options.classname:
        classname = options.classname
    else:
        classname = "%s%s" % ((name.upper())[0], name[1:])
    version = options.version

    options.dest = os.path.abspath(
        os.path.expandvars(os.path.expanduser(options.dest)))
    if not options.group.startswith('openmdao.'):
        options.group = 'openmdao.' + options.group

    templates, class_templates, test_template = _load_templates()

    startdir = os.getcwd()
    try:
        os.chdir(options.dest)

        if os.path.exists(name):
            raise OSError("Can't create directory '%s' because it already"
                          " exists." % os.path.join(options.dest, name))

        cfg = SafeConfigParser(dict_type=OrderedDict)
        stream = StringIO.StringIO(templates['setup.cfg'] % {
            'name': name,
            'version': version
        })
        cfg.readfp(stream, 'setup.cfg')
        cfgcontents = StringIO.StringIO()
        cfg.write(cfgcontents)

        template_options = \
            _get_template_options(os.path.join(options.dest, name),
                                  cfg, classname=classname)

        template_options['srcmod'] = name

        dirstruct = {
            name: {
                'setup.py': templates['setup.py'] % template_options,
                'setup.cfg': cfgcontents.getvalue(),
                'MANIFEST.in': templates['MANIFEST.in'] % template_options,
                'README.txt': templates['README.txt'] % template_options,
                'src': {
                    name: {
                        '__init__.py':
                        '',  # 'from %s import %s\n' % (name,classname),
                        '%s.py' % name:
                        class_templates[options.group] % template_options,
                        'test': {
                            'test_%s.py' % name:
                            test_template % template_options,
                            '__init__.py': """ """
                        },
                    },
                },
                'docs': {
                    'conf.py': templates['conf.py'] % template_options,
                    'index.rst': templates['index.rst'] % template_options,
                    'srcdocs.rst': _get_srcdocs(options.dest, name),
                    'pkgdocs.rst': _get_pkgdocs(cfg),
                    'usage.rst': templates['usage.rst'] % template_options,
                    '_static': {},
                },
            },
        }

        build_directory(dirstruct)

    finally:
        os.chdir(startdir)

    return 0
Пример #17
0
def configuration(file='config.ini', encoding='utf-8'):
    parser = SafeConfigParser()
    with codecs.open(file, 'r', encoding=encoding) as f:
        parser.readfp(f)
    return parser
Пример #18
0
def plugin_makedist(parser, options, args=None, capture=None, srcdir='src'):
    """A command-line script (plugin makedist) points to this.  It creates a
    source distribution containing Sphinx documentation for the specified
    distribution directory.  If no directory is specified, the current directory
    is assumed.

    usage: plugin makedist [dist_dir_path]

    """
    if args:
        print_sub_help(parser, 'makedist')
        return -1

    dist_dir = os.path.abspath(
        os.path.expandvars(os.path.expanduser(options.dist_dir_path)))
    _verify_dist_dir(dist_dir)

    startdir = os.getcwd()
    os.chdir(dist_dir)

    templates, class_templates, test_template = _load_templates()

    try:
        plugin_build_docs(parser, options)

        cfg = SafeConfigParser(dict_type=OrderedDict)
        cfg.readfp(open('setup.cfg', 'r'), 'setup.cfg')

        print "collecting entry point information..."
        cfg.set('metadata', 'entry_points', _get_entry_points(srcdir))

        template_options = _get_template_options(
            options.dist_dir_path, cfg, packages=find_packages(srcdir))

        dirstruct = {
            'setup.py': templates['setup.py'] % template_options,
        }

        name = cfg.get('metadata', 'name')
        version = cfg.get('metadata', 'version')

        if sys.platform == 'win32':  # pragma no cover
            disttar = "%s-%s.zip" % (name, version)
        else:
            disttar = "%s-%s.tar.gz" % (name, version)
        disttarpath = os.path.join(startdir, disttar)
        if os.path.exists(disttarpath):
            print "Removing existing distribution %s" % disttar
            os.remove(disttarpath)

        build_directory(dirstruct, force=True)

        cmdargs = [sys.executable, 'setup.py', 'sdist', '-d', startdir]
        if capture:
            stdout = open(capture, 'w')
            stderr = STDOUT
        else:  # pragma no cover
            stdout = None
            stderr = None
        try:
            retcode = call(cmdargs, stdout=stdout, stderr=stderr)
        finally:
            if stdout is not None:
                stdout.close()
        if retcode:
            cmd = ' '.join(cmdargs)
            sys.stderr.write(
                "\nERROR: command '%s' returned error code: %s\n" %
                (cmd, retcode))
            return retcode
    finally:
        os.chdir(startdir)

    if os.path.exists(disttar):
        print "Created distribution %s" % disttar
        return 0
    else:
        sys.stderr.write("\nERROR: failed to make distribution %s" % disttar)
        return -1
Пример #19
0
def WriteConfig():
    cfg = SafeConfigParser()
    cfg.optionxform = str

    section = 'config'
    cfg.add_section(section)
    cfg.set(section, "seriespath", autosub.SERIESPATH)
    cfg.set(section, "logfile", autosub.LOGFILE)
    cfg.set(section, "downloadeng", str(autosub.DOWNLOADENG))
    cfg.set(section, "downloaddutch", str(autosub.DOWNLOADDUTCH))
    cfg.set(section, "fallbacktoeng", str(autosub.FALLBACKTOENG))
    cfg.set(section, "englishsubdelete", str(autosub.ENGLISHSUBDELETE))
    cfg.set(section, "subeng", autosub.SUBENG)
    cfg.set(section, "subnl", autosub.SUBNL)
    cfg.set(section, "notifyen", str(autosub.NOTIFYEN))
    cfg.set(section, "notifynl", str(autosub.NOTIFYNL))
    cfg.set(section, "postprocesscmd", autosub.POSTPROCESSCMD)
    cfg.set(section, "subcodec", autosub.SUBCODEC)
    cfg.set(section, "launchbrowser", str(autosub.LAUNCHBROWSER))
    cfg.set(section, "skiphiddendirs", str(autosub.SKIPHIDDENDIRS))
    cfg.set(section, "wantedfirst", str(autosub.WANTEDFIRST))
    cfg.set(section, "podnapisi", str(autosub.PODNAPISI))
    cfg.set(section, "subscene", str(autosub.SUBSCENE))
    cfg.set(section, "opensubtitles", str(autosub.OPENSUBTITLES))
    cfg.set(section, "addic7ed", str(autosub.ADDIC7ED))
    cfg.set(section, "opensubtitlesuser", autosub.OPENSUBTITLESUSER)
    cfg.set(section, "opensubtitlespasswd", autosub.OPENSUBTITLESPASSWD)
    cfg.set(section, "addic7eduser", autosub.ADDIC7EDUSER)
    cfg.set(section, "addic7edpasswd", autosub.ADDIC7EDPASSWD)
    cfg.set(section, "browserrefresh", str(autosub.BROWSERREFRESH))
    cfg.set(section, "minmatchscore", str(autosub.MINMATCHSCORE))
    cfg.set(section, "searchinterval", str(autosub.SEARCHINTERVAL))
    cfg.set(section, "configversion", str(autosub.CONFIGVERSION))
    cfg.set(section, "hearingimpaired", str(autosub.HI))
    cfg.set(section, "skipstringnl", autosub.SKIPSTRINGNL)
    cfg.set(section, "skipstringen", autosub.SKIPSTRINGEN)
    cfg.set(section, "skipfoldersnl", autosub.SKIPFOLDERSNL)
    cfg.set(section, "skipfoldersen", autosub.SKIPFOLDERSEN)

    section = 'webserver'
    cfg.add_section(section)
    cfg.set(section, "webserverip", str(autosub.WEBSERVERIP))
    cfg.set(section, 'webserverport', str(autosub.WEBSERVERPORT))
    cfg.set(section, "username", autosub.USERNAME)
    cfg.set(section, "password", autosub.PASSWORD)
    cfg.set(section, "webroot", autosub.WEBROOT)

    section = 'logfile'
    cfg.add_section(section)
    cfg.set(section, "loglevel", logging.getLevelName(autosub.LOGLEVEL))
    cfg.set(section, "loglevelconsole",
            logging.getLevelName(autosub.LOGLEVELCONSOLE))
    cfg.set(section, "logsize", str(autosub.LOGSIZE))
    cfg.set(section, "lognum", str(autosub.LOGNUM))

    section = 'notify'
    cfg.add_section(section)
    if autosub.NOTIFYMAIL:
        cfg.set(section, "notifymail", str(autosub.NOTIFYMAIL))
        cfg.set(section, "mailsrv", autosub.MAILSRV)
        cfg.set(section, 'mailfromaddr', autosub.MAILFROMADDR)
        cfg.set(section, "mailtoaddr", autosub.MAILTOADDR)
        cfg.set(section, "mailusername", autosub.MAILUSERNAME)
        cfg.set(section, "mailpassword", autosub.MAILPASSWORD)
        cfg.set(section, "mailsubject", autosub.MAILSUBJECT)
        cfg.set(section, "mailencryption", autosub.MAILENCRYPTION)
        cfg.set(section, "mailauth", autosub.MAILAUTH)
    if autosub.NOTIFYGROWL:
        cfg.set(section, "notifygrowl", str(autosub.NOTIFYGROWL))
        cfg.set(section, "growlhost", autosub.GROWLHOST)
        cfg.set(section, "growlport", autosub.GROWLPORT)
        cfg.set(section, "growlpass", autosub.GROWLPASS)
    if autosub.NOTIFYNMA:
        cfg.set(section, "notifynma", str(autosub.NOTIFYNMA))
        cfg.set(section, "nmaapi", autosub.NMAAPI)
        cfg.set(section, "nmapriority", str(autosub.NMAPRIORITY))
    if autosub.NOTIFYTWITTER:
        cfg.set(section, "notifytwitter", str(autosub.NOTIFYTWITTER))
        cfg.set(section, "twitterkey", autosub.TWITTERKEY)
        cfg.set(section, "twittersecret", autosub.TWITTERSECRET)
    if autosub.NOTIFYPROWL:
        cfg.set(section, "notifyprowl", str(autosub.NOTIFYPROWL))
        cfg.set(section, "prowlapi", autosub.PROWLAPI)
        cfg.set(section, "prowlpriority", str(autosub.PROWLPRIORITY))
    if autosub.NOTIFYPUSHALOT:
        cfg.set(section, "notifypushalot", str(autosub.NOTIFYPUSHALOT))
        cfg.set(section, "pushalotapi", autosub.PUSHALOTAPI)
    if autosub.NOTIFYPUSHBULLET:
        cfg.set(section, "notifypushbullet", str(autosub.NOTIFYPUSHBULLET))
        cfg.set(section, "pushbulletapi", autosub.PUSHBULLETAPI)
    if autosub.NOTIFYPUSHOVER:
        cfg.set(section, "notifypushover", str(autosub.NOTIFYPUSHOVER))
        cfg.set(section, "pushoverappkey", autosub.PUSHOVERAPPKEY)
        cfg.set(section, "pushoveruserkey", autosub.PUSHOVERUSERKEY)
    if autosub.NOTIFYBOXCAR2:
        cfg.set(section, "notifyboxcar2", str(autosub.NOTIFYBOXCAR2))
        cfg.set(section, "boxcar2token", autosub.BOXCAR2TOKEN)
    if autosub.NOTIFYPLEX:
        cfg.set(section, "notifyplex", str(autosub.NOTIFYPLEX))
        cfg.set(section, "plexserverhost", autosub.PLEXSERVERHOST)
        cfg.set(section, "plexserverport", autosub.PLEXSERVERPORT)

    section = 'skipshow'
    cfg.add_section(section)
    for Show in autosub.SKIPSHOW:
        if re.match("^[0-9 ,.-]+$", autosub.SKIPSHOW[Show]):
            cfg.set(section, Show.replace(':', '~'), autosub.SKIPSHOW[Show])

    section = 'namemapping'
    cfg.add_section(section)
    for Name in autosub.USERNAMEMAPPING:
        cfg.set(section, Name.replace(':', '~'), autosub.USERNAMEMAPPING[Name])

    section = 'addic7edmapping'
    cfg.add_section(section)
    for Name in autosub.USERADDIC7EDMAPPING:
        cfg.set(section, Name, autosub.USERADDIC7EDMAPPING[Name])

    try:
        with open(autosub.CONFIGFILE, 'wb') as cfile:
            cfg.write(cfile)
    except Exception as error:
        return error
    return 'Config has been saved.'
Пример #20
0
def main_thread():

    parser = create_parser()
    opt = parser.parse_args()
    conf = SafeConfigParser()

    if opt.conf:
        conf.readfp(opt.conf)
    elif opt.reset:
        conf.readfp(opt.reset)
    elif opt.list:
        conf.readfp(opt.list)
    else:
        parser.print_help()
        return

    workspace_ = conf.get('common', 'workspace')
    try:
        os.makedirs(workspace_)
    except OSError:
        pass

    if opt.reset:
        Filter(workspace_).reset()
        return

    if opt.list:
        keys = Filter(workspace_).list_doing_task()
        for k in keys:
            print k
        return

    output_service_conf = dict(conf.items('source'))
    input_service_conf = dict(conf.items('destination'))
    if opt.filelist:
        output_service_conf['filelist'] = opt.filelist

    if conf.has_option('common', 'threads'):
        _threads = conf.getint('common', 'threads')
    else:
        _threads = 10

    if conf.has_option('common', 'record_succ'):
        _record_succ = conf.getboolean('common', 'record_succ')
    else:
        _record_succ = True

    print _record_succ

    log_config['handlers']['error_file']['filename'] = path.join(
        workspace_, 'failed_files.txt')
    dictConfig(log_config)

    loads_services()
    output_service = services_[output_service_conf['type']](
        **output_service_conf)
    input_service = services_[input_service_conf['type']](**input_service_conf)

    migrator = ThreadMigrator(input_service=input_service,
                              output_service=output_service,
                              work_dir=conf.get('common', 'workspace'),
                              threads=_threads,
                              record_succ=_record_succ)
    migrator.start()

    import time
    try:
        while True:
            state = migrator.status()

            if state['finish']:
                break
            time.sleep(3)

    except KeyboardInterrupt:
        state = migrator.status()
        print state
        #import sys
        #sys.exit()

    migrator.stop()
    state = migrator.status()
    print 'summary:\n ', 'failed: ', state['fail'], ' success: ', state[
        'success']
Пример #21
0
from airflow.operators.python_operator import PythonOperator
from airflow.exceptions import AirflowException
from airflow.operators.sensors import ExternalTaskSensor 
from airflow.operators.bash_operator import BashOperator 
from airflow.operators.email_operator import  EmailOperator
from airflow.operators import PythonOperator 
import airflow.models as af_models 
import airflow.operators as af_op

import json
import os, sys
from dateutil import parser
from urllib.request import urlopen

from ConfigParser import SafeConfigParser
_cfg = SafeConfigParser()
_cfg.read('airflow.cfg')

integration_host = _cfg.get('integration_server', 'host')
integration_port = _cfg.get('integration_server', 'port')

def Task_Store(ingestion_id, exchange):
    def Task1(**self):
        url = 'http://%s:%s/ingestions/getnstore_%s_files/?ingid=%s' \
            % (integration_host, integration_port, exchange, ingestion_id)
        urlopen(url).read()
    return Task1

def Task_Validate(ingestion_id, exchange):
    def Task1(**self):
        url = 'http://%s:%s/validations/data_validation/?ingid=%s' \
Пример #22
0
def get_conf(src_fs):
    """Get the conf from a firmware fs"""
    cfg = SafeConfigParser()
    with src_fs.open('dataplicity.conf') as f:
        cfg.readfp(f)
    return cfg
Пример #23
0
class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
    """Component XML-RPC request handler.
    
    Adds support for HTTP authentication.
    
    Exceptions:
    CouldNotAuthenticate -- client did not present acceptable authentication information
    
    Methods:
    authenticate -- prompt a check of a client's provided username and password
    handle_one_request -- handle a single rpc (optionally authenticating)
    """
    logger = logging.getLogger("Cobalt.Server.XMLRPCRequestHandler")

    class CouldNotAuthenticate(Exception):
        """Client did not present acceptible authentication information."""

    require_auth = True
    credentials = {'root': 'default'}
    try:
        config = SafeConfigParser()
        config.read(Cobalt.CONFIG_FILES)
        credentials['root'] = config.get('communication', 'password')
    except:
        pass

    def authenticate(self):
        """Authenticate the credentials of the latest client."""
        try:
            header = self.headers['Authorization']
        except KeyError:
            self.logger.error("No authentication data presented")
            raise self.CouldNotAuthenticate(
                "client did not present credentials")
        auth_type, auth_content = header.split()
        auth_content = base64.standard_b64decode(auth_content)
        try:
            username, password = auth_content.split(":")
        except ValueError:
            username = auth_content
            password = ""
        try:
            valid_password = self.credentials[username]
        except KeyError:
            raise self.CouldNotAuthenticate("unknown user: %s" % username)
        if password != valid_password:
            raise self.CouldNotAuthenticate("invalid password for %s" %
                                            username)

    def parse_request(self):
        """Extends parse_request.
        
        Optionally check HTTP authentication when parsing."""
        if not SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.parse_request(
                self):
            return False
        if self.require_auth:
            try:
                self.authenticate()
            except self.CouldNotAuthenticate, e:
                self.logger.error("Authentication failed: %s" % e.args[0])
                code = 401
                message, explanation = self.responses[401]
                self.send_error(code, message)
                return False
        return True
import time
import json
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from pyvirtualdisplay import Display
from ConfigParser import SafeConfigParser
import redis
import sys 
import os

PARSER = SafeConfigParser()
PARSER.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini'))

REDIS_HOST = PARSER.get('redis', 'host')
REDIS_PORT= PARSER.get('redis', 'port')

tempdb = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)

def start_driver():
    """Open headless chromedriver"""
    print 'Starting Web driver...'
    display = Display(visible=0, size=(800, 600))
    display.start()
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--headless")
    driver = webdriver.Chrome('/root/tracker_headless/chromedriver', chrome_options=chrome_options)
    #driver = webdriver.Chrome("C:\Program Files (x86)\Google\Chrome\Application\chrome.exe")
    # print "Sleeping for 4 ..."
    time.sleep(4)
    return driver, display
def main():
    # === Extract options ===
    parser = OptionParser(usage="usage: %prog [options] <# of seconds to audit>")
    parser.add_option("-c", "--config", dest='configFile', default=None, help='Path to configuration file')
    parser.add_option("-g", "--gracePeriod", dest='gracePeriod', default=0, help='Number of seconds from now backwards to ignore')
    parser.add_option("-i", "--historyFile", dest='historyFile', default=None, help='Stores any pending transactions and the last run time')
    parser.add_option('-l', "--logFile", dest='logFile', default=None, help='Saves a log of all Amazon transactions')
    (options, args) = parser.parse_args()

    if len(args) != 1:
        parser.print_usage()
        exit()

    startTime = datetime.fromtimestamp(int(time.time()) - int(args[0]), pytz.utc)
    endTime = datetime.fromtimestamp(int(time.time()) - int(options.gracePeriod), pytz.utc)
    print("AWS refund audit requested from %s to %s" % (startTime.isoformat(), endTime.isoformat()))

    # === Get the configuration options ===
    config = SafeConfigParser()
    fileList = ['./amazon-config.cfg']
    if options.configFile is not None:
        fileList.append(options.configFile)
    config.read(fileList)

    # === Open up ze STOMP ===
    sc = DistStomp(config.get('Stomp', 'server'), config.getint('Stomp', 'port'))
    sc.connect()

    # === Connection to Amazon ===
    aws = Amazon(
        awsEndpoint = config.get('AwsConfig', 'endpoint'),
        awsAccessKey = config.get('AwsConfig', 'accessKey'),
        awsSecret = config.get('AwsConfig', 'secretKey')
    )

    # === Connection to MySQL ===
    dbcon = MySQL.connect(
        config.get('MySQL', 'host'),
        config.get('MySQL', 'user'),
        config.get('MySQL', 'password'),
        config.get('MySQL', 'schema')
    )

    # === Open up the history and log files ===
    # If the history file exists, it will modify the start time of this script to be the end time of the
    # history file.
    hfile = None
    historyStart = startTime
    historyEnd = endTime
    if options.historyFile and os.path.exists(options.historyFile):
        hfile = open(options.historyFile, 'r')
        if hfile.readline().strip() == AWS_HISTORY_FILE_VERSTR:
            historyStart = dateutil.parser.parse(hfile.readline().strip())
            historyEnd = dateutil.parser.parse(hfile.readline().strip())
            startTime = historyEnd
            print("History file modified search period, now %s to %s" % (startTime.isoformat(), endTime.isoformat()))
    else:
        print('Not starting with a valid history file.')

    sfile = None
    if options.logFile:
        sfile = open(options.logFile, 'a')
        sfile.write("!!! Starting run for dates %s -> %s\n" % (startTime.isoformat(), endTime.isoformat()))

    # === Sanity checks ===
    if endTime < startTime:
         startTime = endTime

    # === Main Application ===
    # --- Process all previously pending transactions from the history file. If the transaction is still in some form
    #     of pending, add it back to the history list.
    historyCount = 0
    historyList = []
    historyStats = {
        'Success': 0,
        'Pending': 0,
        'Failed': 0,
        'Ignored': 0
    }
    if hfile:
        print("Processing history file")
        for txn in hfile:
            historyCount += 1
            txn = json.loads(txn)
            result = processTransaction(txn, dbcon, aws, sc, sfile, config)
            historyStats[result] += 1
            if result == 'Pending':
                historyList.append(txn)
        hfile.close()

    # --- Obtain AWS history ---
    print("Obtaining AWS transactions for the period %s -> %s" % (startTime.isoformat(), endTime.isoformat()))
    awsTransactions = aws.getAccountActivity(startTime, endDate=endTime, fpsOperation='Pay')
    print("Obtained %d transactions" % len(awsTransactions))

    # --- Main loop: checks each aws transaction against the Civi database; adding it if it doesn't exist ---
    txncount = 0
    for txn in awsTransactions:
        txncount += 1
        result = processTransaction(txn, dbcon, aws, sc, sfile, config)
        historyStats[result] += 1
        if result == 'Pending':
            historyList.append(txn)

    print("\n--- Finished processing of messages. ---\n")

    # --- Prepare the history file for write ---
    if options.historyFile:
        print("Rewriting history file with %d transactions" % len(historyList))
        hfile = open(options.historyFile, 'w')
        hfile.write("%s\n%s\n%s\n" % (AWS_HISTORY_FILE_VERSTR, historyStart.isoformat(), endTime.isoformat()))
        for txn in historyList:
            hfile.write("%s\n" % json.dumps(txn))
        print("Flushing history file in preparation for main loop")
        hfile.flush()

    # --- Final statistics ---
    print("%d new AWS messages" % txncount)
    print(" Additionally %d messages were processed from history" % historyCount)
    print("This resulted in the following:")
    for entry in historyStats.items():
        print(" %s Messages: %d" % entry)

    # === Final Application Cleanup ===
    print("\nCleaning up.")
    dbcon.close()
    sc.disconnect()

    if hfile:
        hfile.close()
    if sfile:
        sfile.close()

    time.sleep(1)   # Let the STOMP library catch up
Пример #26
0
    def handle(self, *args, **options):
        """
        Read from file
        """

        parser = SafeConfigParser()
        parser.read(args[0])

        print("Starting import...")
        print("Reading config from file {0}".format(args[0]))

        header_name = "importdata"
        location = parser.get(header_name, 'location')
        course_id = parser.get(header_name, 'course_id')
        problem_id = parser.get(header_name, 'problem_id')
        prompt_file = parser.get(header_name, 'prompt_file')
        essay_file = parser.get(header_name, 'essay_file')
        essay_limit = int(parser.get(header_name, 'essay_limit'))
        state = parser.get(header_name, "state")
        next_grader_type = parser.get(header_name, "next_grader")
        add_grader = parser.get(header_name, "add_grader_object") == "True"
        set_as_calibration = parser.get(header_name,
                                        "set_as_calibration") == "True"
        max_score = parser.get(header_name, "max_score")
        student_id = parser.get(header_name, 'student_id')
        increment_ids = parser.get(header_name, 'increment_ids')
        rubric_file = parser.get(header_name, 'rubric_file')
        import_rubric_scores = parser.get(header_name,
                                          'import_rubric_scores') == "True"
        rubric_scores_file = parser.get(header_name, 'rubric_scores_file')

        rubric = open(settings.REPO_PATH / rubric_file).read()
        prompt = open(settings.REPO_PATH / prompt_file).read()

        score, text = [], []
        combined_raw = open(settings.REPO_PATH / essay_file).read()
        raw_lines = combined_raw.splitlines()
        for row in xrange(1, len(raw_lines)):
            score1, text1 = raw_lines[row].strip().split("\t")
            text.append(text1)
            score.append(int(score1))

        if increment_ids:
            student_id = int(student_id)

        if import_rubric_scores:
            rubric_scores = []
            combined_raw = open(settings.REPO_PATH / rubric_scores_file).read()
            raw_lines = combined_raw.splitlines()
            for row in xrange(1, len(raw_lines)):
                rubric_score_row = []
                for score_item in raw_lines[row].strip().split("\t"):
                    rubric_score_row.append(int(score_item))
                rubric_scores.append(rubric_score_row)

        for i in range(0, min(essay_limit, len(text))):
            sub = Submission(
                prompt=prompt,
                student_id=student_id,
                problem_id=problem_id,
                state=state,
                student_response=text[i],
                student_submission_time=timezone.now(),
                xqueue_submission_id=uuid4().hex,
                xqueue_submission_key="",
                xqueue_queue_name="",
                location=location,
                course_id=course_id,
                next_grader_type=next_grader_type,
                posted_results_back_to_queue=True,
                previous_grader_type="BC",
                max_score=max_score,
                rubric=rubric,
                preferred_grader_type=next_grader_type,
            )

            sub.save()
            if add_grader:
                sub.previous_grader_type = "IN"
                sub.save()
                grade = Grader(
                    score=score[i],
                    feedback="",
                    status_code=GraderStatus.success,
                    grader_id="",
                    grader_type="IN",
                    confidence=1,
                    is_calibration=set_as_calibration,
                )

                grade.submission = sub
                grade.save()

                success, rubric_targets = controller.rubric_functions.generate_targets_from_rubric(
                    sub.rubric)
                scores = []
                for z in xrange(0, len(rubric_targets)):
                    scores.append(random.randint(0, rubric_targets[z]))
                if import_rubric_scores:
                    score_item = rubric_scores[i]
                    if len(score_item) == len(scores):
                        scores = score_item
                        log.debug("Score: {0} Rubric Score: {1}".format(
                            score[i], scores))

                controller.rubric_functions.generate_rubric_object(
                    grade, scores, sub.rubric)

            if increment_ids:
                student_id += 1

        print(
            "Successfully imported {0} essays using configuration in file {1}."
            .format(
                min(essay_limit, len(text)),
                args[0],
            ))
Пример #27
0
        result = True

        while log_line and fetch_position < position + self.replication_max_replay_logs:
            log_line = linecache.getline(
                'replication/replay_logs/server.replay', fetch_position)
            if log_line:
                data.append(log_line)
                fetch_position += 1
            else:
                result = False

        return data


if __name__ == '__main__':
    config = SafeConfigParser()
    config.read([
        os.path.join(os.path.dirname(__file__), 'conf/default.conf'),
        # any other files to overwrite defaults here
    ])

    s = Server(
        host=config.get('server', 'host'),
        port=config.getint('server', 'port'),
        max_clients=config.getint('server', 'max_clients'),
        read_buffer=config.getint('server', 'read_buffer'),
        max_memory_allocation=config.get('server', 'max_memory_allocation'),
        persistance=config.get('server', 'persistance'),
        persistance_interval=config.getint('server', 'persistance_interval'),
        garbage_collection_interval=config.getint(
            'server', 'garbage_collection_interval'),
Пример #28
0
def get_agent_config_vars(normalization_ids_map):
    config_vars = {}
    try:
        if os.path.exists(
                os.path.join(parameters['homepath'], "kafka", "config.ini")):
            parser = SafeConfigParser()
            parser.read(
                os.path.join(parameters['homepath'], "kafka", "config.ini"))
            insightFinder_license_key = parser.get(
                'kafka', 'insightFinder_license_key')
            insightFinder_project_name = parser.get(
                'kafka', 'insightFinder_project_name')
            insightFinder_user_name = parser.get('kafka',
                                                 'insightFinder_user_name')
            sampling_interval = parser.get('kafka', 'sampling_interval')
            group_id = parser.get('kafka', 'group_id')
            all_metrics = parser.get('kafka', 'all_metrics').split(",")
            client_id = parser.get('kafka', 'client_id')
            normalization_ids = parser.get('kafka',
                                           'normalization_id').split(",")
            if len(insightFinder_license_key) == 0:
                logger.error(
                    "Agent not correctly configured(license key). Check config file."
                )
                sys.exit(1)
            if len(insightFinder_project_name) == 0:
                logger.error(
                    "Agent not correctly configured(project name). Check config file."
                )
                sys.exit(1)
            if len(insightFinder_user_name) == 0:
                logger.error(
                    "Agent not correctly configured(username). Check config file."
                )
                sys.exit(1)
            if len(sampling_interval) == 0:
                logger.error(
                    "Agent not correctly configured(sampling interval). Check config file."
                )
                sys.exit(1)
            if len(group_id) == 0:
                logger.error(
                    "Agent not correctly configured(group id). Check config file."
                )
                sys.exit(1)
            if len(normalization_ids[0]) != 0:
                for index in range(len(all_metrics)):
                    metric = all_metrics[index]
                    normalization_id = int(normalization_ids[index])
                    if normalization_id > 1000:
                        logger.error(
                            "Please config the normalization_id between 0 to 1000."
                        )
                        sys.exit(1)
                    normalization_ids_map[
                        metric] = GROUPING_START + normalization_id
            if len(normalization_ids[0]) == 0:
                count = 1
                for index in range(len(all_metrics)):
                    metric = all_metrics[index]
                    normalization_ids_map[metric] = GROUPING_START + count
                    count += 1
            config_vars['licenseKey'] = insightFinder_license_key
            config_vars['projectName'] = insightFinder_project_name
            config_vars['userName'] = insightFinder_user_name
            config_vars['samplingInterval'] = sampling_interval
            config_vars['groupId'] = group_id
            config_vars['clientId'] = client_id
    except IOError:
        logger.error("config.ini file is missing")
    return config_vars
def forcing(config, action, prod, file):
    """Peforms the action on the given data
       product and corresponding input file.

       Args:
           config (string) : Config file name
           action (string):  Supported actions are:
                             'regrid' - regrid and downscale
           prod (string):  The first product [mandatory option]:
                            (MRMS, HRRR or RAP)
           file (string):  The file name (full path not necessary,
                            this is derived from the Python config/
                            param file and the YYYMMDD portion of 
                            the file name.

       Returns:
           None           Performs the indicated action on the
                          files based on the type of product and
                          any other relevant information provided
                          by the Python config/param file,
                          wrf_hydro_forcing.parm
 
 
    """

    # Read the parameters from the config/param file.
    parser = SafeConfigParser()
    parser.read(config)

    # Set up logging, environments, etc.
    forcing_config_label = "Anal_Assim"
    whf.initial_setup(parser,forcing_config_label)

    # Convert the action to lower case 
    # and the product name to upper case
    # for consistent checking
    action_requested = action.lower()
    product_data_name = prod.upper()
   
    # For analysis and assimilation, only 0hr, 3hr forecast fields from HRRR/RAP
    # are necessary. 3hr forecast files are already regridded and downscaled 
    # from the short-range configuration, so only 0hr forecast files are regridded/downscaled
    # here. In addition, MRMS data will be regridded, when available. 
    if action == 'regrid': 
        (date,modelrun,fcsthr) = whf.extract_file_info(file)
        # Usually check for forecast range, but only 0, 3 hr forecast/analysis data used
   
        # Check for HRRR, RAP, MRMS products. 
        WhfLog.info("Regridding and Downscaling for %s", product_data_name)

        if fcsthr == 0 and prod == "HRRR":
            downscale_dir = parser.get('downscaling', 'HRRR_downscale_output_dir_0hr')
            try:
                regridded_file = whf.regrid_data(product_data_name,file,parser,False, \
                                 zero_process=True)
            except (FilenameMatchError, NCLError ) as e:
                WhfLog.error("Unexpected filename format encountered while regridding 0hr HRRR")
                raise
            except NCLError:
                WhfLog.error("NCL error encountered while regridding 0hr HRRR")
                raise
            try:
                whf.downscale_data(product_data_name,regridded_file, parser,False, False, \
                                 zero_process=True)

            except (FilenameMatchError, NCLError ) as e:
                WhfLog.error("Unexpected filename format encountered while downscaling 0hr HRRR")
                raise
            except NCLError:
                WhfLog.error("NCL error encountered while downscaling 0hr HRRR")
                raise
      

            # Move downscaled file to staging area where triggering will monitor
            match = re.match(r'.*/([0-9]{10})/([0-9]{12}.LDASIN_DOMAIN1.nc)',regridded_file)
            if match:
                full_dir = downscale_dir + "/" + match.group(1)
                full_finished_file = full_dir + "/" + match.group(2)
                # File should have been created in downscale_data step.
                try:
                    whf.file_exists(full_finished_file)
                except UnrecognizedCommandError:
                    WhfLog.error("File move failed for regridded/downscaled 0hr HRRR , filename format unexpected")
                    raise
                try: 
                    whf.move_to_finished_area(parser, prod, full_finished_file, zero_move=True)
                except:
                    WhfLog.error('Unsupported/unrecognized command encountered while moving file to finished area.')
                    raise
            else:
                WhfLog.error("File name format is unexpected")
                raise FilenameMatchError("File name format is unexpected")
        elif fcsthr == 0 and prod == "RAP":
            downscale_dir = parser.get('downscaling', 'RAP_downscale_output_dir_0hr')
            try:
                regridded_file = whf.regrid_data(product_data_name,file,parser,False, \
                                 zero_process=True)
            except NCLError:
                WhfLog.error("NCL error while regridding 0hr RAP")
                raise
            except FilenameMatchError:
                WhfLog.error("Unexpected filename format encountered, cannot regrid 0hr RAP")
                raise

            try:
                whf.downscale_data(product_data_name,regridded_file, parser,False, False, \
                                   zero_process=True)
            except (NCLError) as e:
                WhfLog.error("NCL error encountered while regridding 0hr RAP")
                raise 

            # Move downscaled file to staging area where triggering will monitor
            match = re.match(r'.*/([0-9]{10})/([0-9]{12}.LDASIN_DOMAIN1.nc)',regridded_file)
            if match:
                full_dir = downscale_dir + "/" + match.group(1)
                full_finished_file = full_dir + "/" + match.group(2)
                # File should have been created in downscale_data step.
                try:
                    whf.file_exists(full_finished_file)
                except MissingFileError as mfe:
                    WhfLog.error("Missing file encountered while moving 0hr RAP file to staging area.")
                    raise 
                try:
                    whf.move_to_finished_area(parser, prod, full_finished_file, zero_move=True) 
                except UnrecognizedCommandError:
                    WhfLog.error("Unrecognized command error while trying to move 0hr RAP file to finished area")
                    raise
                except FilenameMatchError:
                    WhfLog.error("File name's format is unexpected.  Cannot move file to finished area")
                    raise
            else:
                WhfLog.error("File name's format is unexpected")
                raise FilenameMatchError('File name format is unexpected')
             
        elif prod == "MRMS":
            try:
                regridded_file = whf.regrid_data(product_data_name,file,parser,False)
            except NCLError:
                WhfLog.error("NCL error encountered while regridding MRMS")
                raise
            except FilenameMatchError:
                WhfLog.error("File name's format is unexpected, cannot regrid MRMS")
                raise
            # Move regridded file to staging area where triggering will monitor
            # First make sure file exists
            try:
                whf.file_exists(regridded_file)
            except MissingFileError as mfe:
                WhfLog.error("Missing file encountered while moving regridded MRMS file")
                raise
           
            try:
                whf.move_to_finished_area(parser, prod, regridded_file, zero_move=False)
            except UnrecognizedCommandError:
                WhfLog.error("Unrecognized command error while trying to move MRMS file to finished area")
                raise
            except FilenameMatchError:
                WhfLog.error("File name's format is unexpecte.  Cannot move file to finished area")
                raise
        else:
            WhfLog.error("Either invalid forecast hour or invalid product chosen")
            WhfLog.error("Only 00hr forecast files, and RAP/HRRR/MRMS valid")
            raise
    else: # Invalid action selected
        WhfLog.error("ERROR [Anal_Assim_Forcing]- Invalid action selected")
        raise UnrecognizedCommandError("Invalid action selection within Analysis and Assimilation regridding and downscaling")
Пример #30
0
# -*- coding: utf-8 -*-
from pprint import pprint
import sys
import os
from ConfigParser import SafeConfigParser
import urllib2
import json

conf = SafeConfigParser({})
try:
    if os.path.isfile("applications/%s/private/localconfig" % request.application):
        conf.read("applications/%s/private/localconfig" % request.application)
    else:
        conf.read("applications/%s/private/config" % request.application)
except:
    pass  #@TEMP probably should log this event...

# add our GitHub client secret from a separate file (kept out of source repo)
if os.path.isfile("applications/%s/private/GITHUB_CLIENT_SECRET" % request.application):
    GITHUB_CLIENT_SECRET = open("applications/%s/private/GITHUB_CLIENT_SECRET" % request.application).read().strip()
    conf.set("apis", "github_client_secret", GITHUB_CLIENT_SECRET)

#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################

## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()