Exemple #1
0
    def progress(self):
        """
            Returns the progress status of the calculation, from 0 to 100. A value
            of 100 sets the internal status to 2 (completed or stopped). After that, any 
            subsequent call to progress will always return 100. If start has not 
            yet been started, returns 0
        """
        with self.__lock:
            status = self.status()
            if status < 2:

                conn = glb.connection()

                s_id = self.__session_id
                progrez = conn.fetchall(
                    "(SELECT COUNT(*) FROM processing.ground_motion WHERE session_id=%s)\
                UNION ALL (SELECT num_targets_failed from processing.sessions where gid=%s)\
                UNION ALL (SELECT num_targets from processing.sessions where gid=%s)",
                    (
                        s_id,
                        s_id,
                        s_id,
                    ))

                total = 1
                done = 0
                failed = progrez[1][0]

                lp = len(progrez)
                #PROGRES SHOULD BE SOMETHING LIKE: [(0L,), (5210L,), (5210L,)] (done_ok, done_failed, total)
                total = progrez[2][0] if lp == 3 else 1
                done_ok = progrez[0][0] if lp == 3 else 0
                done_failed = progrez[1][0] if lp == 3 else 0

                done = done_ok + done_failed

                conn.close()
                if done >= total:
                    if failed > total:
                        self.stop(
                            "No target succesfully written (internal server error)"
                        )
                    elif failed == total:
                        self.stop("No target succesfully written")
                    else:
                        mzg = "{:d} of {:d} ground motion distributions succesfully calculated".format(
                            done_ok, total)
                        if done_ok < total: self.warning(mzg)
                        else: self.msg(mzg)

                    self.__status = 2
                    return 100.0
                else:
                    return (100.0 * done) / total

            else:
                return 100.0 if status >= 2 else 0.0
Exemple #2
0
 def progress(self):
     """
         Returns the progress status of the calculation, from 0 to 100. A value
         of 100 sets the internal status to 2 (completed or stopped). After that, any 
         subsequent call to progress will always return 100. If start has not 
         yet been started, returns 0
     """
     with self.__lock:
         status = self.status()
         if status < 2: 
             
             conn = glb.connection()
             
             s_id = self.__session_id
             progrez = conn.fetchall("(SELECT COUNT(*) FROM processing.ground_motion WHERE session_id=%s)\
             UNION ALL (SELECT num_targets_failed from processing.sessions where gid=%s)\
             UNION ALL (SELECT num_targets from processing.sessions where gid=%s)",(s_id,s_id,s_id,))
             
             total=1
             done=0
             failed = progrez[1][0]
             
             lp = len(progrez)
             #PROGRES SHOULD BE SOMETHING LIKE: [(0L,), (5210L,), (5210L,)] (done_ok, done_failed, total)
             total = progrez[2][0] if lp==3 else 1
             done_ok = progrez[0][0] if lp==3 else 0
             done_failed = progrez[1][0] if lp==3 else 0
             
             done = done_ok + done_failed
             
             conn.close()
             if done >= total:
                 if failed > total:
                     self.stop("No target succesfully written (internal server error)")
                 elif failed == total:
                     self.stop("No target succesfully written")
                 else:
                     mzg = "{:d} of {:d} ground motion distributions succesfully calculated" .format(done_ok, total)
                     if done_ok < total: self.warning(mzg)
                     else: self.msg(mzg)
                     
                 self.__status = 2
                 return 100.0
             else:
                 return (100.0 * done) / total  
             
         else:
             return 100.0 if status>=2 else 0.0
Exemple #3
0
def calculaterisk(gm, percentiles, session_id, scenario_id, target_id, geocell_id):
    
    db_conn = glb.connection() #FIXME: THIS CAN BE PASSED FROM THE MAIN METHOD?
    

    # get exposure informations for the given location
    exp = exposure_module.exposure(db_conn,geocell_id)

    
    #Get DPMs
    vul = vulnerability_module.vulnerability(gm,exp.bldg_dist,exp.bt_prop)
    #dg_pdfs=vul.dg_pdf #FIXME: UNUSED
    #get bt damage
    bt_dmg=vul.damage_bts()


    ###Calculate loss
    loss = loss_module.loss(session_id, scenario_id, target_id, geocell_id, exp.bldg_dist, exp.bt_prop, exp.target_prop, bt_dmg)
    loss.calculate()
    loss.write2db(db_conn, percentiles)
Exemple #4
0
def geocell_run(intensity, lat_sta, lon_sta, percentiles, target_id, geocell_id, ground_motion_only, scenario_id, session_id, logdir = None ):
    """
        Performs a ground motion calculation given the above arguments. Writes to database the percentiles
    """
    
    def val(value):
        #value should be either a scalar number or a numpy.ndarray element. Check below is WEAK, because it checks only if 
        #len(value) can be called, but it avoids importing numpy etcetera
        return mcerp.UncertainFunction(value) if hasattr(value, '__len__') else float(value)
    
    try:
        
       
        #intensity = gmpe_func(val(M), val(lat_hyp), val(lon_hyp), val(depth_hyp), lat_sta, lon_sta)
        #intensity = gmpe_func(lat_sta, lon_sta)
        
        #INTENSITY IS EITHER A SCALAR OR A UNCERTAINFUNCTION.MCERP ARRAY (NUMPTY ARRAY)
        intensity = val(intensity)
        
#        _p = intensity.percentile(percentiles) if isinstance(intensity, mcerp.UncertainFunction) else len(percentiles)*[intensity]
        _dist = [v for v in globals.discretepdf(intensity,_intensity_labels)]
        _dist.append(intensity.percentile(0.5) if isinstance(intensity, mcerp.UncertainFunction) else intensity)
        #write to dbase
           
        
        arg1 =  """INSERT INTO processing.ground_motion (target_id, geocell_id, scenario_id, session_id, ground_motion) VALUES (%s, %s, %s, %s, %s);""" 
        arg2 = (target_id, geocell_id, scenario_id, session_id, _dist) 
        
#        arg1 =  """INSERT INTO processing.ground_motion (target_id, geocell_id, scenario_id, session_id, percentiles, ground_motion) VALUES (%s, %s, %s, %s, %s, %s);""" 
#        arg2 = (target_id, geocell_id, scenario_id, session_id, _p, _dist) 
        
        conn = globals.connection()
        conn.execute(arg1, arg2)
        conn.close()
        
        #do risk calculation (risk is Michael source, modified by me)
        if not ground_motion_only:
            risk_calc.calculaterisk(intensity, percentiles, session_id, scenario_id, target_id, geocell_id)

    except Exception as run_exc:
    
        if _DEBUG_:
            import traceback
            traceback.print_exc()
        
        #connect to the database and write the failed number:
        conn = globals.connection()
        conn.execute("UPDATE processing.sessions SET num_targets_failed = num_targets_failed + 1 where gid=%s",(session_id,))
        conn.close()
        
        #         log dir must be passed as argument problems when declaring global var (maybe multiprocess?)
        if _DEBUG_:
            if logdir is not None:
                try:
                    folder = logdir
                    if os.path.exists(folder):
                        name = ''.join(random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(10)) if geocell_id is None else str(geocell_id)
                        file = os.path.join(folder, name+".log")
                        output = open(file,'w')
                        traceback.print_exc(file=output)
                        output.close()
                except:
                    traceback.print_exc()
Exemple #5
0
def caravan_run(input_event):
    """
        Performs a gorund motion calculation of the Caravan application
    """
    
    conn = None
    exception = None
    
    try:
        #Note: we pass a runinfo object actually, does this is necessary? how to handle if runinfo is already stopped? 
        #should not be the case, however... 
        runinfo = input_event.start() if isinstance(input_event, RunInfo) else RunInfo(input_event) #.start() harmless if already started
        
        scenario = runinfo.scenario() 
        
        #NOTE: MCERP NPTS (params.DNP) CANNOT BE SET HERE CAUSE IT NEEDS TO BE SET PRIOR TO ANY CALCULATION, 
        #EVEN THE SCENARIO CREATION (WHICH). MOVED TO CARAVAN_WSGI RUN_SIMULATION
        
        gmpe_func = scenario.gmpe 
        
        #Connect from within the process:
        conn = globals.connection(async=False)
        
        scenario_id , isnew = scenario.writetodb(conn)
        
        if not isnew:
            #arg1 = """DELETE FROM processing.ground_motion WHERE scenario_id=%d """ % scenario_id
            arg1 = """DELETE FROM processing.ground_motion WHERE scenario_id=%s"""
            
            #From see http://www.postgresql.org/docs/9.1/static/sql-delete.html:
            #we could write "DELETE FROM processing.ground_motion WHERE scenario_id=%d RETURNING COUNT(*)""" % scenario_id
            #and then: conn.execute(arg1)
            #but that gives me (first command, postgres error): "aggregate functions are not allowed in RETURNING"
            #googling there are solutions (BUT starting from postgres9.1):
            #http://stackoverflow.com/questions/6577823/how-can-i-insert-the-return-of-delete-into-insert-in-postgresql
            #Therefore, skip that solution
            
            #ANOTHER NICER EXAMPLE found here: http://stackoverflow.com/questions/2251567/how-to-get-the-number-of-deleted-rows-in-postgresql
            #AS LONG AS THE CONNECTION IS NOT IN ASYNC MODE (we can switch it to sync here. It must be async inside each apply_async call below only)
            #USES psycopg2 cursor class and is preferable. Therefore:
            cursor = conn.cursor(arg1, (scenario_id,))
            conn.commit() #NOTE: in async mode does nothing
            rows_deleted = cursor.rowcount
            cursor.close()
            #this works because (psycopg2 cursor doc) a QUERY was executed in execute(). Now, delete seems not a query to me,
            #however it seems to work
            
            runinfo.msg("Using already stored Scenario (hash={0:d}), deleted {1:d} previously calculated cells" .format(scenario.dbhash(),rows_deleted))
        else:
            runinfo.msg("Written new scenario to databse (hash={0:d}):<br>Scenario = {1}".format(scenario.dbhash(),scenario.dbstr()))
                
        runinfo.msg("Scenario id: {:d}".format(scenario_id))    
        
        #define function to get value from scenario
        def scalar(key):
            val = scenario[key]
            
            if isinstance(val, mcerp.UncertainFunction):
                return val.mean #NOTE: not mean() !! it is a @property!!
            else:
                return float(val) #parse to float to be sure
        
        #CALCULATE RADIUS OF INTEREST
        key_a_ref = gk.AOI
        a_ref = scenario[key_a_ref] if key_a_ref in scenario else None
        
        ref_d = None
        
        #GET TESSELLATIONS:
        key_tess_ids = gk.TES
        tess_ids = globals.tess_ids if not key_tess_ids in scenario else scenario[key_tess_ids]
        
        tess_id_str = " or ".join([("t.tess_id={:d}".format(t)) for t in tess_ids])
        runinfo.msg("Tessellation id(s): {}".format(', '.join(["{:d}".format(tid) for tid in tess_ids])))
        
        if a_ref is not None: 
            lon1, lat1, lon2, lat2 = a_ref
            targets = conn.fetchall("""select t.gid as target_id, t.geocell_id as geocell_id, st_X(t.the_geom) as lon, 
        st_Y(t.the_geom) as lat from exposure.targets as t where ("""+tess_id_str+""") 
        and st_within(t.the_geom, ST_MakeEnvelope(%s, %s, %s, %s, 4326));""", (lon1, lat1, lon2, lat2))

            runinfo.msg("Area: map rectangle ("+key_a_ref+" parameter [lon1, lat1, lon2, lat2], see above)") #str should place dot or not automatically

        else:
            key_i_ref = gk.AIR
            key_i_ref_km_step = gk.AKS
            
            I_ref = scenario[key_i_ref] if key_i_ref in scenario else globals.aoi_i_ref
            ref_dist_km_step = scenario[key_i_ref_km_step] if key_i_ref_km_step in scenario else globals.aoi_km_step #ref_dist_km_step

            ref_d = ref_dist(gmpe_func, I_ref, ref_dist_km_step)
            runinfo.msg("Area(I &ge; {:.2f}) radius: {:.1f} Km" .format(I_ref, ref_d)) #str should place dot or not automatically

            if ref_d < ref_dist_km_step:
                msg = "Epicentral intensity smaller than intensity reference = {:.2f} mw".format(I_ref)
                if _DEBUG_:
                    print(msg)
                raise Exception(msg)
            
            #GET TESSELLATION POINTS (GEOCELLS):
            key_lat = gk.LAT #"latitude"
            key_lon = gk.LON #"longitude"


            targets = conn.fetchall("""select t.gid as target_id, t.geocell_id as geocell_id, st_X(t.the_geom) as lon, 
            st_Y(t.the_geom) as lat from exposure.targets as t where ("""+tess_id_str+""") 
            and st_dwithin(geography(st_point(%s, %s)), geography(t.the_geom), %s);""" , (scalar(key_lon), scalar(key_lat), ref_d * 1000))
        #conn.commit()
        
        subprocesses = len(targets)
        num_malformed = 0
        
        if subprocesses:
            #targets is a table of columns:
            #target_id, geocell_id, lat, lon
            #with length the number of geocells
            #Note that geocell_id might be missing
            #Do a check Now? YES!
            def formwell(t):
                try: 
                    #t is a tuple, we cannot assign to it.
                    #Simply do a check and preserve old values
                    #Note that the check below is fine also if elements are numeric strings
                    #the drawback of instantiating a new list is however too much effort
                    #as data should be numeric
                    _,_,_,_ = int(t[0]), int(t[1]), float(t[2]), float(t[3])
                except: 
#                    import traceback
#                    traceback.print_exc()
                    return None
                return t
                
            for i in range(subprocesses):
                t = targets[i]
                twf = formwell(t)
                if twf is None: 
                    num_malformed+=1
                    targets[i] = twf
        else:
            msg = "No target cells found (zero cells)"
            raise Exception(msg)        
        
        if num_malformed >= subprocesses:
            msg = "All target cells are malformed (missing or non-numeric values)"
            raise Exception(msg) 
        elif num_malformed > 0:
            runinfo.warning("{:d} of {:d} target cells found (skipping {:d} malfromed cells, containing missing or NaN values)".\
                        format(subprocesses - num_malformed, subprocesses, num_malformed))
        else:
            runinfo.msg("{:d} target cells found".format(subprocesses))
            
        
        #WRITE session_id in processing.sessions
        
        #see https://docs.python.org/2/library/datetime.html#datetime.datetime.now (we use utcnow instead of now)
        #and http://initd.org/psycopg/docs/usage.html#adaptation-of-python-values-to-sql-types
        session_id = None
        while True:
            session_timestamp = datetime.utcnow() #returns a datetime object, which is converted to timestamp in psycopg2
            session_rows = conn.fetchall("SELECT COUNT(*) FROM processing.sessions WHERE session_timestamp=%s;", (session_timestamp,))
            if session_rows[0][0] == 0:
                #NOTE: do NOT run immediately num_malformed, otherwise the progressbar and time counter have invalid data
                #just set subprocesses-num_malformed as the cells to be done: 
                cursor = conn.cursor("INSERT INTO processing.sessions (scenario_id, session_timestamp, num_targets, num_targets_failed) \
                VALUES(%s, %s, %s, %s) RETURNING gid;", (scenario_id, session_timestamp, subprocesses-num_malformed, 0,))
                
                #Note returns works because of RETURNING string in dbase execute command. execute() can return elements without return 
                #explicitly written as long as (psycopg2 cursor doc) a QUERY was executed in execute(). Now, insert is not a query, 
                #but for instance (see few lines above) DELETE it is
                conn.commit() #note: in async mode does nothing
                ret = cursor.fetchall()
                
                session_id = ret[0][0] #gid (serial number) of the newly added row
                cursor.close()
                break
        
        if session_id is None:
            raise Exception("Unable to get session_id. Internal server error")
        
        runinfo.msg("Session id: {:d}".format(session_id), "Starting main process (might take a while...)")
        
        #CREATE A PROCESS and a Pool of processes. For info see:
        
        #For info: http://stackoverflow.com/questions/25071910/multiprocessing-pool-calling-helper-functions-when-using-apply-asyncs-callback
        P = multiprocessing.Pool()
        runinfo.setprocess(P, session_id)
        #NOTES:
        #ARGUMENTS TO APPLY_ASYNC MUST BE PICKABLE, AS WELL AS THE FUNCTION (FIRST ARGUMENT).
        #SEE https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled
        #THIS MEANS THAT DISTRIBUTIONS MUST BE CREATED EACH TIME INSIDE OUR TARGET FUNCTION 
        #NOTE ALSO THAT FOR DEBUG WE SHOULD USE APPLY OR MAP, the COUNTERPART OF APPLY_ASYNC AND MAP_ASYNC, cause the latter
        #do not raise exceptions
        #See also https://github.com/tisimst/mcerp/blob/master/mcerp/__init__.py
        
        logdir = None
        DO_WRITE_DIR = False #secondary debug flag
        if _DEBUG_ and DO_WRITE_DIR:
            folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../_tmp/runerrors/{:d}".format(session_id))
            if os.path.exists(folder):
                print("EMPTYING DIR {0}".format(str(folder)))
                for the_file in os.listdir(folder):
                    file_path = os.path.join(folder, the_file)
                    try:
                        if os.path.isfile(file_path):
                            os.unlink(file_path)
                    except Exception: pass
            elif os.path.exists(os.path.dirname(folder)):
                try:os.mkdir(folder)
                except: pass
                
            if os.path.exists(folder): logdir = folder
            
                
        #'opposite' to scalar defined above: returns a numpy.ndarray if the value represents an mcerp distribution,
        #otherwise the float value. Note that numpy.ndarray seems to be PICKABLE ('passable' as multiprocess.Pool argument)
        def val(value):
            #value = scenario[key]
            
            if isinstance(value, mcerp.UncertainFunction):
                return value._mcpts #this is a numpy.array element WHICH IS PICKABLE, SO IT CAN BE PASSED TO multiprocess.Pool
            else:
                return float(value) #parse to float to be sure
        
        #close the connection NOW?
        conn.commit()
        conn.close()
        conn = None
        
        #set percentiles. Note that any mcerp distribution raises error if percentiles are outside 
        #the number of points. Thus a distribution with 10 pts cannot calculate percentiles at, e.g., 0.05 and 0.95
        #we therefore need to set the mcepr npts here
        percentiles = globals.percentiles

        key_gm_only = gk.GMO
        gm_only = scenario[key_gm_only] if key_gm_only in scenario else globals.gm_only
        
        #gmpe's might contain uncertain functions defined in mcerp WHICH ARE NOT PICKABLE!
        #Given the fact that making them pickable seems to be a mess for our case
        #(examples found in the internet deal with easy to use cases), we FIRST 
        #calculate the intensity distribution, then we pass ITS ARRAY (a numpy array, which IS pickable)
        #to our geocell_run function
        #Drawback: we are not calculating intensity in a separate process, which means
        #we therefore do not take 100% advantages of the multiprocess pool
        
        for t in targets:
            if t is None: continue
            intensity = gmpe_func(t[3], t[2])
            #geocell_run(gmpe_func, t[3], t[2], percentiles, t[0], t[1], gm_only, scenario_id, session_id, logdir)
            P.apply_async(geocell_run, [val(intensity), t[3], t[2], percentiles, t[0], t[1], gm_only, scenario_id, session_id, logdir])
            
        P.close()
            
    except Exception as e:
        exception = e
        if _DEBUG_:
            import traceback
            print(traceback.format_exc())
    finally:
        if conn is not None:
            conn.close()
        if exception is not None:
            runinfo.stop(e)
            
        return runinfo
Exemple #6
0
def geocell_run(intensity,
                lat_sta,
                lon_sta,
                percentiles,
                target_id,
                geocell_id,
                ground_motion_only,
                scenario_id,
                session_id,
                logdir=None):
    """
        Performs a ground motion calculation given the above arguments. Writes to database the percentiles
    """
    def val(value):
        #value should be either a scalar number or a numpy.ndarray element. Check below is WEAK, because it checks only if
        #len(value) can be called, but it avoids importing numpy etcetera
        return mcerp.UncertainFunction(value) if hasattr(
            value, '__len__') else float(value)

    try:

        #intensity = gmpe_func(val(M), val(lat_hyp), val(lon_hyp), val(depth_hyp), lat_sta, lon_sta)
        #intensity = gmpe_func(lat_sta, lon_sta)

        #INTENSITY IS EITHER A SCALAR OR A UNCERTAINFUNCTION.MCERP ARRAY (NUMPTY ARRAY)
        intensity = val(intensity)

        #        _p = intensity.percentile(percentiles) if isinstance(intensity, mcerp.UncertainFunction) else len(percentiles)*[intensity]
        _dist = [v for v in globals.discretepdf(intensity, _intensity_labels)]
        _dist.append(
            intensity.percentile(0.5)
            if isinstance(intensity, mcerp.UncertainFunction) else intensity)
        #write to dbase

        arg1 = """INSERT INTO processing.ground_motion (target_id, geocell_id, scenario_id, session_id, ground_motion) VALUES (%s, %s, %s, %s, %s);"""
        arg2 = (target_id, geocell_id, scenario_id, session_id, _dist)

        #        arg1 =  """INSERT INTO processing.ground_motion (target_id, geocell_id, scenario_id, session_id, percentiles, ground_motion) VALUES (%s, %s, %s, %s, %s, %s);"""
        #        arg2 = (target_id, geocell_id, scenario_id, session_id, _p, _dist)

        conn = globals.connection()
        conn.execute(arg1, arg2)
        conn.close()

        #do risk calculation (risk is Michael source, modified by me)
        if not ground_motion_only:
            risk_calc.calculaterisk(intensity, percentiles, session_id,
                                    scenario_id, target_id, geocell_id)

    except Exception as run_exc:

        if _DEBUG_:
            import traceback
            traceback.print_exc()

        #connect to the database and write the failed number:
        conn = globals.connection()
        conn.execute(
            "UPDATE processing.sessions SET num_targets_failed = num_targets_failed + 1 where gid=%s",
            (session_id, ))
        conn.close()

        #         log dir must be passed as argument problems when declaring global var (maybe multiprocess?)
        if _DEBUG_:
            if logdir is not None:
                try:
                    folder = logdir
                    if os.path.exists(folder):
                        name = ''.join(
                            random.choice(
                                'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
                            for _ in range(10)) if geocell_id is None else str(
                                geocell_id)
                        file = os.path.join(folder, name + ".log")
                        output = open(file, 'w')
                        traceback.print_exc(file=output)
                        output.close()
                except:
                    traceback.print_exc()
Exemple #7
0
def query_simulation_data(request, response):
    event = request.json
    session_id = event['session_id']
    #print("session id"+str(session_id))
    conn = glb.connection(async=True)
    #query:
    #note: ::json casts as json, not as jason-formatted string
    #::double precision is NECESSARY as it returns a json convertible value, otherwise array alone returns python decimals
    #which need to be converted to double prior to json dumps

    data = conn.fetchall("""SELECT 
ST_AsGeoJSON(ST_Transform(G.the_geom,4326))::json AS geometry, GM.geocell_id, GM.ground_motion, risk.social_conseq.fatalities_prob_dist, risk.econ_conseq.total_loss
FROM 
processing.ground_motion as GM
LEFT JOIN 
risk.social_conseq ON (risk.social_conseq.geocell_id = GM.geocell_id and risk.social_conseq.session_id = GM.session_id)
LEFT JOIN 
risk.econ_conseq ON (risk.econ_conseq.geocell_id = GM.geocell_id and risk.econ_conseq.session_id = GM.session_id)
LEFT JOIN 
exposure.geocells as G ON (G.gid = GM.geocell_id)
WHERE 
GM.session_id=%s""",(session_id,)) 

    #conn.conn.commit()
    conn.close()
    
    #HYPOTHESES:
    #1) The query above returns a table T whose header (columns) are:
    #    | geometry | geocell_id | ground_motion | fatalities_prob_dist | total_loss
    #
    #2) a single T row (R) corresponds to a geojson feature F:
    #{
    #    type: 'Feture', //geojson standard string (see doc)
    #    geometry : dict, //associated to geometry column
    #    id: number_or_string, //associated to geocell_id column
    #    properties:{
    #       gk.MSI: {data: usually_array, value:numeric_scalar}, //associated to ground_motion column
    #       gk.FAT: {data: usually_array, value:numeric_scalar}, //associated to fatalities_prob_dist column
    #       gk.ECL: {data: usually_array, value:numeric_scalar}  //associated to total_loss column
    #    }
    #}   
    # gk.MSI, gk.FAT and gk.MSI.ECL refer to globalkeys global variables. They are just strings 
    # but defined globally for multi-purpose usage
    # 3) EACH PROPERTIES FIELD HAS TWO VALUES, DATA AND VALUE. WHICH WILL BE CALCULATED FROM ANY DATABASE ROW IN 
    # THE FUNTION process DEFINED BELOW. DATA IS MEANT TO BE AN ARRAY OF DATA TO BE VISUALIZED WHEN MOUSE IS
    # OVER THE RELATIVE GEOCELL ON THE MAP, WHEREAS VALUE IS THE VALUE TO BE VISUALIZED BY MEANS OF E.G. A COLOR
    # FOR A PARTICULAR GEOCELL. Example: given a set of data bins representing the distribution at some values, e.g. [0.2, 0.5, 0.3],
    # then properties.data is that array, and value might be e.g., the median, or the max, or the index of max, or whatever
    # (the important thing is that JavaScript side one knows what are data and value in order to display them on the map)
    
    #NOW we define the columns to be set as properties (excluding geometry):
    #Each column here corresponds to a leaflet Layer in JavaScript
    #associating each of them to a table column index (see 1) in comment above):
    captions = {gk.MSI:2, gk.FAT:3, gk.ECL: 4} 
    
    #THIS IS THE MAIN FUNCTION
    def process(name, row, row_index):
        try:
            data = row[row_index]
            if name == gk.MSI: #return the median
                #pop last element, which is the median according to core.py
                m = data.pop() #removes and returns the last index
                return data, m
            elif name == gk.FAT: #return INDEX OF max value
                remVal = 1
                max = 0
                ind_of_max = 0
                i = 0
                for n in data:
                    if n > max: 
                        max = n
                        ind_of_max = i
                    remVal -= n
                    if remVal < max: break #< not <=: priority to higher value, if two or more are equal
                    i += 1

                return data, ind_of_max
            elif name == gk.ECL: #economic losses, to be implemented
                pass
        except: pass #exception: return None, None below
        #elif ... here implement new values for newly added names
        return None, None
    
    dataret = {"type": "FeatureCollection", "features": None, "captions": {k:"" for k in captions}}
    features = [] #pre-allocation doesn't seem to matter. See e.g. http://stackoverflow.com/questions/311775/python-create-a-list-with-initial-capacity
    
    #set set of empty layers. As soon as we have a valid data 
    #for a geocell g and a layer name N in the loop below, remove N from the 
    #set defined below. This might be used JavaScript side to know immediately if a layer is empty
    #or not, avoiding consuming memory 
    empty_layers = {k for k in captions} #do a cpoy
    
    for row in data:
        cell = {'geometry': row[0], 'id':row[1], 'type':'Feature', 'properties':{}}
        for name in captions:
            index = captions[name]
            data, value = process(name, row, index)
            #remove the empty layers key if data is valid:
            if name in empty_layers and not (data is None and value is None): empty_layers.remove(name)
            property = {'data': data, 'value': value}
            cell['properties'][name] = property

        features.append(cell)
    dataret['features'] = features
    dataret['emptyLayers'] = {k:True for k in empty_layers}

    return response.tojson(dataret)