Exemple #1
0
def skynet(vis, mode=3, closure_tels=['ST001', 'DE601', 'DE605'], cthr=1.3):
    ra, dec = taql_funcs.taql_from(vis, 'FIELD', 'PHASE_DIR')
    closure_scatter = closure.closure(vis, closure_tels, plotfile='')
    if closure_scatter > cthr:
        return closure_scatter
    if mode == 1:  # this is the default of the self_calibration_pipeline_V2.
        os.system('python self_calibration_pipeline_V2.py -m ' + vis +
                  ' -p')  # amplitudes?
    elif mode == 2:  # use NDPPP to selfcal the long baselines to a small (0.1") Gaussian
        model_engine.write_skymodel(ra, dec,
                                    np.array([0.0, 0.0, 1.0, 0.1, 0.0, 0.0]),
                                    vis + '_mod')
        skynet_NDPPP(vis, vis + '_mod', solint=5)  # timesteps
        os.system(
            'python self_calibration_pipeline_V2.py -d CORRECTED_DATA -m ' +
            vis + ' -p')
    elif mode == 3:  # make an engine model and selfcal against this
        model_engine.mainscript(vis,
                                closure_tels,
                                PLOTTYPE=0,
                                outname=vis + '_mod')
        skynet_NDPPP(vis, vis + '_mod', solint=5)
        os.system(
            'python self_calibration_pipeline_V2.py -d CORRECTED_DATA -m ' +
            vis + ' -p')
    else:
        return np.nan
Exemple #2
0
def countRegions():
    # Requires: -the first command line argument is the name of the
    #            image to be segmented
    #           -the second command line argument is the color space being
    #            used, either RGB, HSV, or HLS
    # Effects: -calls closure with count foreground argument on, returns
    #           count of distinct foreground objects
    
    colorSpace = argv[2].lower()
    
    if not (colorSpace in ["rgb", "hsv", "hls"]):
        print "Second argument not one of RGB, HSV, or HLS"
        print "The first argument should be the name of the image to be segmented"
        print "Followed by the desire color space representation"
        exit(1)
    
    try:
        image = Image.open(argv[1])
        imageData = colorSpaceConvert(list(image.getdata()), argv[2].lower())
    except:
        print "Invalid or no image name given"
        print "The first argument should be the name of the image to be segmented"
        print "Followed by the desire color space representation"
        exit(1)
        
    if colorSpace == "rgb":
        redMinMax = raw_input("Red min-max, between 0 and 255: ")
        greenMinMax = raw_input("Green min-max, between 0 and 255: ")
        blueMinMax = raw_input("Blue min-max, between 0 and 255: ")
        redMinMax = [float(x) / 255.0 for x in redMinMax.split()]
        greenMinMax = [float(x) / 255.0 for x in greenMinMax.split()]
        blueMinMax = [float(x) / 255.0 for x in blueMinMax.split()]
        colorRanges = [redMinMax, greenMinMax, blueMinMax]
    elif colorSpace == "hsv":
        hueMinMax = raw_input("Hue min-max, between 0 and 360: ")
        satMinMax = raw_input("Saturation min-max, between 0 and 100: ")
        valMinMax = raw_input("Value min-max, between 0 and 100: ")
        hueMinMax = [float(x) / 360.0 for x in hueMinMax.split()]
        satMinMax = [float(x) / 100.0 for x in satMinMax.split()]
        valMinMax = [float(x) / 100.0 for x in valMinMax.split()]
        colorRanges = [hueMinMax, satMinMax, valMinMax]
    else:
        hueMinMax = raw_input("Hue min-max, between 0 and 360: ")
        lightMinMax = raw_input("Lightness min-max, between 0 and 100: ")
        satMinMax = raw_input("Saturation min-max, between 0 and 100: ")
        hueMinMax = [float(x) / 360.0 for x in hueMinMax.split()]
        lightMinMax = [float(x) / 100.0 for x in lightMinMax.split()]
        satMinMax = [float(x) / 100.0 for x in satMinMax.split()]
        colorRanges = [hueMinMax, lightMinMax, satMinMax]
    
    param = Parameters()
    param.setImageSize(image.size)
    param.setColorRanges(colorRanges)
    
    seg = segmentation.colorSegmenter()
    mask = seg.segmentImage(imageData, param, True)
    
    close = closure.closure()
    close.segmentRegions(mask, param, 0, True, False)
Exemple #3
0
def load_configuration():
    """Load the command line arguments."""

    builds = closure.closure(BUILD_CONFIG)
    if len(builds) == 0:
        print(colorama.Fore.RED + "No build targets specified." +
              colorama.Fore.RESET)
    return builds
Exemple #4
0
def main():
    # commented out try for debugging purposes
    #try:
        shortOpts = "d:f:s:i:m:hepc:r:"
        longOpts = ["segment=", "fitness=", "search=", "image=", 
                    "mask=", "help", "export", "plot", "close=", "predict="]
        
        try:
            options, remainder = getopt.getopt(sys.argv[1:], shortOpts, longOpts)
        except:
            print "\nERROR: Invalid option argument\n"
            exit(1)
        
        export = False
        plot = False
        close = False
        predictImages = False
        
        # sets relevant variables based on command line input
        for opt, arg in options:
            if opt in ("-d", "--segment"):
                segmenterName = arg
            elif opt in ("-f", "--fitness"):
                fitnessName = arg
            elif opt in ("-s", "--search"):
                searchName = arg
            elif opt in ("-i", "--image"):
                imageName = arg
            elif opt in ("-m", "--mask"):
                idealMaskName = arg
            elif opt in ("-e", "--export"):
                export = True
            elif opt in ("-p", "--plot"):
                plot = True
            elif opt in ("-c", "--close"):
                close = True
                closeType = arg
            elif opt in ("-r", "--predict"):
                predictImages = True
                predictFolderName = arg
            elif opt in ("-h", "--help"):
                print __doc__
                exit(0)
            else:
                pass
        
        # quit if extraneous input provided       
        if remainder != []:
            print "\nERROR: Extraneous input\n"
            exit(1)
            
        # initialize segmenter algorithm
        if segmenterName.lower() in ("rgb", "hsv", "hls"):
            segmenter = segmentation.colorSegmenter()
        else:
            print "\nERROR: Invalid or no segmenter name\n"
            exit(1)
        
        # initialize fitness function
        if fitnessName.lower() == "diff":
            fitnessFunc = fitness.absDiffFitness()
        else:
            print "\nERROR: Invalid or no fitness name\n"
            exit(1)
        
        # initialize search space algorithm
        if searchName.lower() == "random":
            searchFunc = RandomSearch.randomSearch(segmenter, fitnessFunc)
        elif searchName.lower() == "genetic":
            searchFunc = GeneticSearch.geneticSearch(segmenter, fitnessFunc)
        elif searchName.lower() == "anneal":
            searchFunc = AnnealSearch.annealSearch(segmenter, fitnessFunc)
        else:
            print "\nERROR: Invalid or no search name\n"
            exit(1)
        
        # try to open image, and convert image data from a [0, 255] RGB space 
        # to a [0, 1] normalized RGB, HSV, or HLS space, depending on the segmenter selected
        # (chooses HSV or HLS if their respective segmenter is selected, else selects RGB)
        # if opening image fails, quit with error
        try:
            image = Image.open(imageName)
            
            # initialize parameters object, init's image size and color space used
            parameter = parameters.Parameters()
            parameter.setImageSize(image.size)
        
            # use hsv or hls if segmenter specified, else use rgb by default
            if segmenterName.lower() in ("hsv", "hls"):
                parameter.setColorSpace(segmenterName.lower())
            else:
                parameter.setColorSpace("rgb")
            
            imageData = colorSpaceConvert(list(image.getdata()), parameter.colorSpace)
                
        except:
            print "\nERROR: Invalid or no image name\n"
            exit(1)
            
        # try to open ideal mask, if it fails, quit with error
        try:
            mask = Image.open(idealMaskName)
            idealMaskData = list(mask.getdata())
        except:
            print "\nERROR: Invalid or no mask name\n"
            exit(1)
        
        # run search on image parameter space for segmentation
        # returns optimal paramaters found upon search completion
        # and saves a plot of fitness vs. search extent if plot set to true
        optimalParameters = searchFunc.searchImage(imageData, idealMaskData, parameter, plot)
        
        # reset upper limit
        optimalParameters.setUpperLimit(float("inf"))
        
        # if export enabled, saves mask using optimal parameters found to output.png
        if (export == True):
            segmenter.segmentImage(imageData, optimalParameters, True)
        
        # if export enabled, saves a 'closed' mask using optimal parameters found to output.png
        if (close == True):
            whiteArg = 1 if closeType.lower() == "white" else 0
            mask = segmenter.segmentImage(imageData, optimalParameters)
            close = closure.closure()
            close.segmentRegions(mask, optimalParameters, saveImage = export, clearWhite = whiteArg)
        

        if predictImages == True:
            predict.predict(predictFolderName, optimalParameters, segmenter, fitnessFunc,
                           plot = True, exportImages = export) 
Exemple #5
0
def pf_prep(country, ameco, data, prg_params, country_params, changey, yf,
            projpath, olslog_path):
    import pandas as pd
    import numpy as np
    import ols_ar
    import closure
    import statsmodels.api as sm
    europop = pd.read_excel(projpath + prg_params.loc['popFile', 'value'],
                            sheet_name="to rats",
                            header=0,
                            index_col=0)
    europop = europop.T
    # NOTE : the europop file as years as string (excel formula) -> convert index to integer on the fly
    europop.index = europop.index.astype('int64')
    ones = pd.Series(1., index=range(1960, changey + 11))
    ratio_cb = pd.Series(np.nan, index=range(1960, changey + 11))
    alpha = float(prg_params.loc['alpha', 'value'])
    clos_nb_y = int(prg_params.loc['clos_nb_y', 'value'])
    starthp = int(country_params.loc['starthp', country])
    # CALCULATE ACTUAL VARIABLES

    # LABOUR (totalh)
    # -----------
    # unemployment
    # create alias of LUR and NAWRU to ease the use of indices
    lurharm = data['LUR']
    dlur = lurharm.diff()
    nawru = data['NAWRU']

    # wages
    w = ameco[country + '_hwcdw']
    gw = w.pct_change() * 100
    data['nwinf'] = gw.diff()

    if country == 'lu':
        l_lux = ameco[
            country +
            '_sle1'] * ones  # employment series. NB: National accounts national concept for all countries
        l_cb = data['l'] - l_lux
        ratio_cb = l_cb / data['l']  # ratio of cross-border workers
        lf = l_lux / (1 - lurharm / 100)
    else:
        lf = data['l'] / (1 - lurharm / 100)

    lu = lurharm * lf
    # for full prog conditions to be added for SLE=1
    sle = ameco[country + '_sle']

    # population of working age
    popw = ameco[country + '_popa1']
    popt = ameco[country + '_popt']

    # participation rate
    part = 100 * lf / popw

    # Extended population at working age (popw) using projections from Eurostat
    # -----------
    popwf = europop[country.upper() + '_POPAF']
    poptf = europop[country.upper() + '_POPTF']

    mpopf = (popwf.shift(-1) + popwf) / (popwf + popwf.shift(1))
    mpoptf = (poptf.shift(-1) + poptf) / (poptf + poptf.shift(1))

    for i in range(1, 7):
        popw[changey + i] = popw[changey + i - 1] * mpopf[changey + i]
        popt[changey + i] = popt[changey + i - 1] * mpoptf[changey + i]

    wpopw = popw.pct_change()
    wpopt = popt.pct_change()

    # Trend Participation Rate (parts)
    # -----------

    # extended series on part

    if country == 'de':
        #	 participation rates of migrants and non-migrants
        MigrationDE = pd.read_excel(projpath +
                                    prg_params.loc['MigrationFile', 'value'],
                                    header=0,
                                    index_col=0)
        partM = MigrationDE['DE_PARTM'] * 100 * ones
        for i in range(changey + 4, changey + yf + 1):
            partM[i] = partM[i -
                             1] + 0.5 * (partM[i - 1] - partM[i - 2]) + 0.5 * (
                                 partM[i - 2] - partM[i - 3])

        partsM = partM.copy()  # trend migrant participation rate = actual rate

        #	 population at working age for migrants and non-migrants

        popwM = MigrationDE[
            'DE_POPWM'] / 1000 * ones  # migrant population at working age
        # assume that after 2023 no new migrants arrive (!) and that all of them stay in the 'migrant' group for several years
        popwM.loc[changey + 4:] = popwM[changey + 3]

        #	 labour force of migrants and non-migrants

        lfM = pd.Series(np.zeros(changey + yf - 1959),
                        index=range(1960,
                                    changey + yf + 1))  # migrant labour force
        lfM.loc[2015:] = popwM.loc[2015:] * partM.loc[2015:] / 100
        LFnonM = lf - lfM  # non-migrant labour force

        partnonM = part.copy()  # non-migrant participation rate
        popwnonM = popw - popwM  # non-migrant population at working age
        partnonM.loc[2015:changey] = LFnonM.loc[2015:changey] / popwnonM.loc[
            2015:changey] * 100  # ???

    nblag = int(country_params.loc['part_nblag', country])
    const = bool(int(country_params.loc['part_const', country]))
    ar_start = int(country_params.loc['part_ar_start', country])

    # We have to deal with shorter series for PART for CY 1997 and HR 2001
    if part.first_valid_index() > starthp:
        starthp = part.first_valid_index()

    time = bool(int(country_params.loc['part_timexog', country]))
    with open(olslog_path, 'a') as f:
        f.write('\n\n\n - - -PART OLS_AR\n')
    part = ols_ar.ols_ar(part, nblag, const, ar_start, changey, yf,
                         olslog_path, time)

    # filter the forecasted series
    if country == 'de':
        # calculate the non migrant participation rate as a part of total actual participation rate
        partnonM.loc[2015:] = part.loc[2015:] * popw.loc[2015:] / (
            popw.loc[2015:] -
            popwM.loc[2015:]) - partM.loc[2015:] * popwM.loc[2015:] / (
                popw.loc[2015:] - popwM.loc[2015:])
        cycle, partsnonM = sm.tsa.filters.hpfilter(
            partnonM.loc[starthp:changey + yf],
            int(country_params.loc['part_lambda', country]))
        partsnonM = partsnonM * ones
        parts = partsnonM.copy()
        parts.loc[2015:] = partsnonM.loc[2015:] * (
            popwnonM.loc[2015:] / popw.loc[2015:]) + partsM.loc[2015:] * (
                popwM.loc[2015:] / popw.loc[2015:])
    else:
        # ISSUE WITH CY HP FILTER START IN 1997 and do not provide values for 1995 ->

        cycle, parts = sm.tsa.filters.hpfilter(
            part.loc[starthp:changey + yf],
            int(country_params.loc['part_lambda', country]))
        parts = parts * ones

    # connect the hp and k filtered series to make it start in 1960
    # because SRKF starts in 1980

    for i in range(1, 21):
        data.loc[1980 - i,
                 'SRKF'] = data.loc[1980 - i + 1,
                                    'SRKF'] - data.loc[1980 - i + 1, 'wsrhp']

    data['wsrkf'] = data['SRKF'].diff()

    # Extended NAWRU (nawru)
    # -----------

    # NAWRU: **** NEW end rule Spring 2014:
    # ----------
    # *I* in the t+5 framework (yf<4) we use an extension rule; "end rule"
    # *I* in t+10 the nawru series is complete and long in the dataset

    # ===========
    # * Autumn Final 2018
    # * since the wage indicator gives the wrong signal for IE nawru, replace the nawru by a simple HP (lurharm)
    # *===========

    if country == 'ie':
        lurori = lurharm.copy()
        lurharm = lurharm * ones
        nblag = 2
        const = True
        ar_start = 1965
        with open(olslog_path, 'a') as f:
            f.write('\n\n\n - - -LURHARM OLS_AR\n')
        lurharm = ols_ar.ols_ar(lurharm, nblag, const, ar_start, changey, yf,
                                olslog_path)
        cycle, lurharms = sm.tsa.filters.hpfilter(
            lurharm.loc[starthp:changey + yf], 10)

        lurharms[changey +
                 1] = lurharms[changey] + .5 * (lurharms[changey] -
                                                lurharms[changey - 1])
        lurharms.loc[changey + 2:] = lurharms[changey + 1]

        nawru = lurharms
    else:
        nawru.loc[changey + 1:changey +
                  yf] = nawru[changey] + 0.5 * (nawru[changey] -
                                                nawru[changey - 1])

    dnawru = nawru.diff()

    # Trend Hours worked per Employee (hperehp)
    # -----------
    # extended series on hours worked
    # = ar(series, nblag, const, ar_start, changey, nb_fcst)
    nblag = int(country_params.loc['hpere_nblag', country])
    const = bool(int(country_params.loc['hpere_const', country]))
    ar_start = int(country_params.loc['hpere_ar_start', country])

    # print('\nExtending HPERE :\n-----------------')
    with open(olslog_path, 'a') as f:
        f.write('\n\n\n - - -HPERE OLS_AR\n')
    data['hpere'] = ols_ar.ols_ar(data['hpere'], nblag, const, ar_start,
                                  changey, yf, olslog_path)

    starthp = int(country_params.loc['starthp', country])
    # filter the forecasted series
    cycle, hperehp = sm.tsa.filters.hpfilter(
        data.loc[starthp:changey + yf, 'hpere'],
        int(country_params.loc['hpere_lambda', country]))
    hperehp = hperehp * ones

    dhpere = data['hpere'].diff()
    whperehp = hperehp.pct_change()
    lfss = parts / 100 * popw
    # CROSS-BORDER WORKERS
    # ------------
    # in the case of LU we need to add cross-border workers
    if country == 'lu':
        # find the best ARIMA proces to explain the cross border worker ratio in the past
        # use this best proces to forecast the series
        # filter this long series to get the trend
        nblag = 2
        const = False
        ar_start = 1978
        with open(olslog_path, 'a') as f:
            f.write('\n\n\n - - -RATIO CB OLS_AR\n')
        ratio_cb = ols_ar.ols_ar(ratio_cb, nblag, const, ar_start, changey, yf,
                                 olslog_path)
        cycle, ratio_cb_hp = sm.tsa.filters.hpfilter(
            ratio_cb.loc[starthp:changey + yf], 10)
        # extend the related series
        l_lux = part / 100 * popw * (1 - lurharm / 100)
        l_cb = l_lux * ratio_cb / (1 - ratio_cb)
        data['l'] = l_lux + l_cb
        data['totalh'] = data['hpere'] * data['l']
        # Trend labour (totalhs)
        # -----------
        l_luxs = parts / 100 * popw * (1 - nawru / 100)
        l_cb_hp = l_luxs * ratio_cb_hp / (1 - ratio_cb_hp)
        #        lfss = parts / 100 * popw  # trend labour force, only includes luxembourgish!
        lp2 = l_luxs + l_cb_hp  # trend employment
    else:
        data['lf'] = data['l'] / (1 - lurharm / 100)
        lp2 = lfss * (1 - nawru / 100)

    wlp2 = lp2.pct_change()
    # trend total hours worked
    totalhs = lp2 * hperehp
    wtotalhs = totalhs.pct_change()

    # create a totalhs specific for the t+10

    # Investment rule (IYPOT)
    # -----------

    srkf_level = np.exp(data['SRKF'])
    ypot = totalhs**alpha * data['k']**(1 - alpha) * srkf_level
    # ypot = ypot * ones
    iypot = 100 * (data['iq'] / ypot)
    iypot = iypot * ones

    # NOTE : THERE IS A COUNTRY SPECIFICITY FOR DE FOR T+10

    nblag = int(country_params.loc['iypot_nblag', country])
    const = bool(int(country_params.loc['iypot_const', country]))
    ar_start = int(country_params.loc['iypot_ar_start', country])

    # print('\nExtending IYPOT :\n-----------------')
    with open(olslog_path, 'a') as f:
        f.write('\n\n\n - - -IYPOT OLS_AR\n')
    iypot = ols_ar.ols_ar(iypot, nblag, const, ar_start, changey, yf,
                          olslog_path)

    # GAP closure rule (ygap)
    # -----------
    ygap = 100 * (data['y'] / ypot - 1)

    # TODO
    ygap = closure.closure(ygap, clos_nb_y, changey)

    # other GAP closure rules (totalh_mt)
    # -----------

    # create the medium term actual series
    part_mt = part.copy()
    hpere_mt = data['hpere'].copy()
    l_mt = data['l'].copy()

    totalh_mt = data['totalh'].copy()
    lurharm_mt = lurharm * ones
    # create the gap series
    partgap = part - parts
    hperegap = data['hpere'] - hperehp
    lurgap = lurharm - nawru

    # close each gap (by the end of the period = 6)

    partgap = closure.closure(partgap, clos_nb_y, changey)
    hperegap = closure.closure(hperegap, clos_nb_y, changey)
    lurgap = closure.closure(lurgap, clos_nb_y, changey)

    # fill in the medium term actual series
    # TODO define a function to do so ?
    part_mt.loc[changey + 1:changey +
                yf] = parts.loc[changey + 1:changey +
                                yf] + partgap.loc[changey + 1:changey + yf]
    hpere_mt.loc[changey + 1:changey +
                 yf] = hperehp.loc[changey + 1:changey +
                                   yf] + hperegap.loc[changey + 1:changey + yf]
    lurharm_mt.loc[changey + 1:changey +
                   yf] = nawru.loc[changey + 1:changey +
                                   yf] + lurgap.loc[changey + 1:changey + yf]

    if country == 'lu':
        lcbgap = ones * 0.
        for i in range(1, clos_nb_y + 1):
            lcbgap[changey +
                   i] = (clos_nb_y - i) / clos_nb_y * (l_cb[changey] -
                                                       l_cb_hp[changey])
        lcbgap.loc[changey + clos_nb_y + 1:lcbgap.size + 1959] = 0.

        l_lux_mt = l_lux
        l_lux_mt.loc[changey + 1:changey + yf] = popw.loc[
            changey + 1:changey +
            yf] * part_mt.loc[changey + 1:changey + yf] / 100 * (
                1 - lurharm_mt.loc[changey + 1:changey + yf] / 100)
        l_cb_mt = l_cb
        l_cb_mt.loc[changey + 1:changey +
                    yf] = l_cb_hp.loc[changey + 1:changey +
                                      yf] - lcbgap.loc[changey + 1:changey +
                                                       yf]
        l_mt.loc[changey + 1:changey +
                 yf] = l_lux_mt.loc[changey + 1:changey +
                                    yf] + l_cb_mt.loc[changey + 1:changey + yf]
    else:
        l_mt.loc[changey + 1:changey +
                 yf] = popw.loc[changey + 1:changey + yf] * part_mt.loc[
                     changey + 1:changey + yf] / 100 * (
                         1 - lurharm_mt.loc[changey + 1:changey + yf] / 100)

    totalh_mt.loc[changey + 1:changey +
                  yf] = l_mt.loc[changey + 1:changey +
                                 yf] * hpere_mt.loc[changey + 1:changey + yf]

    data = pd.concat([
        data,
        ygap.rename('ygap'),
        ypot.rename('ypot'),
        part_mt.rename('part_mt'),
        parts.rename('parts'),
        lurharm_mt.rename('lurharm_mt'),
        hpere_mt.rename('hpere_mt'),
        lfss.rename('lfss'),
        lp2.rename('lp2'),
        totalhs.rename('totalhs'),
        l_mt.rename('l_mt'),
        totalh_mt.rename('totalh_mt'),
        popw.rename('popw'),
        popt.rename('popt'),
        iypot.rename('iypot')
    ],
                     axis=1)
    return data