def ReadLogConfig():
    try:

        appDirectory = GF.GetRootDirectory()
        file_Address_Private_Config = GF.GetAddressTo(main=appDirectory,
                                                      folderName="Config",
                                                      fileName="Private",
                                                      extension="cnf")
        file_Address_Public_Config = GF.GetAddressTo(main=appDirectory,
                                                     folderName="Config",
                                                     fileName="Public",
                                                     extension="cnf")

        if GF.IsFileExist(file_Address_Private_Config):
            ReadLoggerConfig(file_Address_Private_Config)

        elif GF.IsFileExist(file_Address_Public_Config):
            ReadLoggerConfig(file_Address_Public_Config)

        else:
            raise Exception("Couldn't find config file")

    except Exception as e:
        logging.exception(e)
        raise
Exemple #2
0
def on_ready():
    #initial retrieve
    GenFun.retrieveUrl()
    print('Logged in as')
    print(client.user.name)
    print(client.user.id)
    print('------')
def ConvertBravaSet(path):
    Dataset_SWC_To_VTP(path)

    # one file seems to be rotated with respect to the others
    with contextlib.redirect_stdout(None):
        file = path + "Set8_ColorCoded.CNG.vtp"
        patient = PatientModule.Patient()
        patient.LoadVTPFile(file)
        totalmatrix = GeneralFunctions.TMatrix(1, [0, 0, 180], [0, 0, 0])
        GeneralFunctions.TransformFile(file, totalmatrix)
def regionGreedyComputeLayout(data, classified_ISs, previous_sequence,
    previous_slot_base_layout, previous_slot_segments, use_heuristic_evaluator):
  # Initialize response variables.
  best_layout = None
  best_slot_base_layout = None
  best_slot_segments = None
  affects_computation_time = True

  # Initialize variables.
  fitness_cache = dict()
  current_timestep = data['time_step']
  extended_interaction_sessions, new_interaction_sessions = classified_ISs
  append_seq_length = len(new_interaction_sessions)

  # Consttruct the potential slot distributions.
  if append_seq_length == 0 :
    seq_pool = [previous_sequence]
    (best_layout, best_fitness,best_seq, best_slot_base_layout,
        best_slot_segments) = Layout.evaluateSequences(seq_pool, data,
        fitness_cache, previous_slot_base_layout, previous_slot_segments)
    affects_computation_time = False
  else:
    # Now we need to decide the slot number of each new appended IS.
    if len(previous_sequence) == 0:
      # Just simply assign a slot number in ASC order
      seq_pool = []
      seq = []
      for i in range(append_seq_length):
        seq.append(i)
      seq_pool.append(seq)
      # Evaluate
      (best_layout, best_fitness,best_seq, best_slot_base_layout,
          best_slot_segments) = Layout.evaluateSequences(seq_pool, data,
          fitness_cache, None, None)
    else :
      # Generate a sequence pool which contains all combinations of sequence.
      previous_occupied_slots, extended_occupied_slots = (
          GeneralFunctions.findOccupiedSlots(previous_slot_base_layout,
          current_timestep))
      seq_pool = GeneralFunctions.generateDynamicSequenceCombinations(
          previous_sequence, previous_occupied_slots, extended_occupied_slots,
          append_seq_length)
      print "seq_pool size: %d" % len(seq_pool)
      if not use_heuristic_evaluator:
          (best_layout, best_fitness,best_seq, best_slot_base_layout,
              best_slot_segments) = Layout.evaluateSequences(
              seq_pool, data, fitness_cache, None, None)
      else:
        modified_interaction_sessions = data['interaction_sessions'][-append_seq_length:]
        (best_layout, best_fitness, best_seq, best_slot_base_layout,
            best_slot_segments) = Layout.evaluateSequencesUsingHeuristics(
            seq_pool, data, fitness_cache, previous_slot_base_layout, previous_slot_segments, modified_interaction_sessions)

  print "best sequence: %s, best fitness: %d" % (best_seq, best_fitness)
  return best_layout, best_seq, best_slot_base_layout, best_slot_segments, affects_computation_time
    def __evaluate__(self, PRED):  #, predict_o_labels=0):
        import numpy as np
        from sklearn.metrics import f1_score

        true_threshold = 0.5
        y_predicted_np_array = PRED

        bool_predicted_np_array = np.zeros(y_predicted_np_array.shape,
                                           dtype=np.int32)
        for i in range(0, y_predicted_np_array.shape[0]):
            for j in range(0, y_predicted_np_array.shape[1]):
                if y_predicted_np_array[i, j] >= true_threshold:
                    #if predict_o_labels or j + 1 != self.devel_data_obj.o_label_id:
                    bool_predicted_np_array[i, j] = 1

        #devel_gold_np_array = self.devel_data_obj.get_y_n_hot_np_array()
        #if not predict_o_labels:
        #    bool_predicted_np_array = np.delete(bool_predicted_np_array, self.devel_data_obj.o_label_id - 1, 1)
        #    devel_gold_np_array = np.delete(devel_gold_np_array, self.devel_data_obj.o_label_id - 1, 1)

        #assert bool_predicted_np_array.shape == devel_gold_np_array.shape

        #f1_macro = f1_score(devel_gold_np_array, bool_predicted_np_array, average='macro')
        #f1_micro = f1_score(devel_gold_np_array, bool_predicted_np_array, average='micro')
        #f1_weighted = f1_score(devel_gold_np_array, bool_predicted_np_array, average='weighted')
        #f1_samples  = f1_score(devel_gold_np_array, bool_predicted_np_array, average='samples')

        assert bool_predicted_np_array.shape == self.devel_data_obj.get_y_n_hot_np_array(
        ).shape

        f1_macro = f1_score(self.devel_data_obj.get_y_n_hot_np_array(),
                            bool_predicted_np_array,
                            average='macro')
        f1_micro = f1_score(self.devel_data_obj.get_y_n_hot_np_array(),
                            bool_predicted_np_array,
                            average='micro')
        f1_weighted = f1_score(self.devel_data_obj.get_y_n_hot_np_array(),
                               bool_predicted_np_array,
                               average='weighted')
        f1_samples = f1_score(self.devel_data_obj.get_y_n_hot_np_array(),
                              bool_predicted_np_array,
                              average='samples')

        self.PredMetricLog.append(
            [self.EpochNoCntr, f1_macro, f1_micro, f1_weighted, f1_samples])

        MSG = self.CurrentArchName
        MSG += "\tEpoch: " + str(self.EpochNoCntr)
        MSG += "\tf1-macro: " + GF.f_round(f1_macro)
        MSG += "\tf1-micro: " + GF.f_round(f1_micro)
        MSG += "\tf1-weighted: " + GF.f_round(f1_weighted)
        MSG += "\tf1_samples: " + GF.f_round(f1_samples)
        print MSG
        """
Exemple #6
0
def travel_time_categorize_rank_table(travel_time_selected_TA,
                                      treatment_area,
                                      export_path="../Intermediate_Files/",
                                      from_scratch=True,
                                      rank_cap=None,
                                      verbose=False):
    """
    travel_time_selected_TA: reduced travel time set to find alternatives to create scale
    rank_cap=None: integer rank at which to stop increasing
    verbose: whether to print progress or stay silent

    returns data frame of value ranks across a row instead of raw values
    """
    file_name = f"{treatment_area}_ranked_tt_plz_table"
    file_ext = "csv"
    file_path = "../Intermediate_Files/"
    if not from_scratch:
        file = GF.check_for_saved(file_path + file_name + "." + file_ext)
        if isinstance(file, pd.DataFrame): return file

    # prepare output structure
    tt_categorized = travel_time_selected_TA.copy()

    # assign rank within the row, up to rank cap
    for i, p in enumerate(travel_time_selected_TA.index):
        if verbose:
            print(
                f"ranking plz {p}, {i} of {len(travel_time_selected_TA.index)}"
            )

        if not rank_cap:
            tt_categorized.loc[p] = [
                t[0] for t in sorted(enumerate(
                    list(travel_time_selected_TA.loc[p])),
                                     key=operator.itemgetter(1))
            ]
        else:
            tt_categorized.loc[p] = [
                min(t[0], rank_cap) for t in sorted(enumerate(
                    list(travel_time_selected_TA.loc[p])),
                                                    key=operator.itemgetter(1))
            ]

    GF.export_data_to_file(tt_categorized,
                           filename=file_name,
                           path=export_path,
                           ftype=file_ext,
                           overwrite=True)

    return tt_categorized
Exemple #7
0
def MapMeshtoMSH(filevtp, filemsh, output="PialSurface.vtp"):
    """
    Apply the mapping on one surface to another surface.
    :param filevtp: The remeshed surface file, see the remesh function.
    :param filemsh: THe file containing the VEASL mapping.
    :param output: Filename of the resulting file.
    :return: Nothing
    """
    print("Mapping msh to vtp.")
    regionsIDs = [4, 21, 22, 23, 24, 25, 26, 30]
    patient = Patient.Patient()
    patient.Perfusion.LoadPrimalGraph(filevtp)
    centroids = patient.Perfusion.PrimalGraph.GetTriangleCentroids()

    msh = GeneralFunctions.MSHfile()
    msh.Loadfile(filemsh)
    positions, elements, indexes = msh.GetSurfaceCentroids(regionsIDs)

    sys.setrecursionlimit(10000)
    KDTree = scipy.spatial.KDTree(positions)
    MinDistance, MinDistanceIndex = KDTree.query(centroids, k=1)

    regiondict = GeneralFunctions.MajorIDdict_inv
    regionsIDs = [
        regiondict[elements[trianglenumber][3]]
        for index, trianglenumber in enumerate(MinDistanceIndex)
    ]
    patient.Perfusion.PrimalGraph.PolygonColour = regionsIDs
    patient.Perfusion.PrimalGraph.File = output
    patient.Perfusion.PrimalGraph.GraphToVTP("")
Exemple #8
0
def response(message):

    #gets message from user and sees what to do with it
    json = GenFun.retrieveUrl()
    json = json['Emotes']

    #checking if there are any trigger words for emotes that need to be dealt with
    for x in json.keys():

        if re.search(r"\b" + re.escape(str(json[x]['trigger'])) + r"\b", str(message.content).lower()):
            textMessage = json[x]['response']
            client.send_message(message.channel, textMessage)

    #checking for a mention in a conversation
    if 'bot ' in str(message.content).lower() or ' bot' in str(message.content).lower():
        #casually interupting the conversation
        textMessage = "You mentioned me " + str(message.author) + '!'
        client.send_message(message.channel, textMessage)

    #checking this phrase
    if 'press f to pay respect' in str(message.content).lower().strip():
        textMessage = 'f'
        #spamming f exactly enough times to fill up the screen on a normal brower window
        for x in range(0, 23):
            client.send_message(message.channel, textMessage)
    def lp(self, ARG_msg):  #log and print message
        try:
            LOCAL_CallerClassName = str(
                inspect.stack()[1][0].f_locals["self"].__class__)
        except:
            LOCAL_CallerClassName = ""

        LOCAL_CallerFunction = inspect.currentframe().f_back.f_code.co_name
        if isinstance(ARG_msg, basestring):
            ARG_msg = [ARG_msg]
        HEADER = "[" + GF.DATETIME_GetNowStr(
        ) + "] [" + LOCAL_CallerClassName + "." + LOCAL_CallerFunction + "]: "
        print HEADER
        self._LogFileHandler.write(HEADER + "\n")
        for itemstr in ARG_msg:
            try:
                itemstr = str(itemstr).replace('\r', '\n')
            except:
                itemstr = itemstr.encode('utf-8').replace('\r', '\n')

            for item in itemstr.split("\n"):
                if len(item) == 0:
                    item = "-"
                item = "      " + item
                print item
                self._LogFileHandler.write(item + "\n")
        print ""
        self._LogFileHandler.write("\n")
    def __init__(self, args):
        self.GLOBAL_BEST_DEVEL_PRED_RESULTS = []

        self.args = args
        self._LogFileHandler = open(args.logfileaddress, "wt")
        self.lp("Program started ...")
        self.__validate_args__()

        self.PARAMS = collections.OrderedDict()
        self.PARAMS[
            "train_filename"] = args.data_folder + '/' + args.ann_set + '/' + args.ann_type + '/' + args.ann_type + '-train-annotations.txt'
        self.PARAMS[
            "devel_filename"] = args.data_folder + '/' + args.ann_set + '/' + args.ann_type + '/' + args.ann_type + '-devel-annotations.txt'
        self.PARAMS[
            "test_filename"] = args.data_folder + '/' + args.ann_set + '/' + args.ann_type + '/' + args.ann_type + '-test-annotations.txt'

        self.PARAMS["X_lower_row_len"] = 1  # Lower text length threshold
        self.PARAMS["X_upper_row_len"] = 400  # Upper text length threshold
        self.PARAMS["X_used_row_len"] = -1
        self.PARAMS[
            "default_embeddings_dim"] = 300  # default size of the used word embeddings when no pre-created embeddings model is given

        MSG = ["" * 80, "PARAMETERS:", "-" * 20]
        for key in self.PARAMS.keys():
            MSG.append(GF.NVLR(key, 20) + " : " + str(self.PARAMS[key]))
        MSG.append("*" * 80)
        self.lp(MSG)
Exemple #11
0
def get559(ut,Rest,name):
    while True:
        try:
            start_time = datetime.datetime.utcnow()
            # ------------------------------------------------------------------------------
            # Consulta UIC559
            # ------------------------------------------------------------------------------
            file559name = "%s_%s_UIC559_%s.xml" % (ut, name, GLOBAL.NOW_STR)
            file559full = "%s/%s" % (SETTINGS.NPATH559, file559name)

            LOG.info("-" * GLOBAL.NCHAR)
            LOG.info("GETTING UIC from %s:%s" % (Rest._ip, Rest._port))
            LOG.info("-" * GLOBAL.NCHAR)
            # The first time we ask for the last 200 events
            if (Rest._uicid != False):
                # Once we have the last id we ask from that id to newer events
                response = Rest.getUicfromid(Rest._uicid)
            else:
                # The first time we ask for the last 200 events
                response = Rest.getUic()

            if Rest._uicid == False or int(Rest._uicid) < int(response.headers['X-Cafpa-UpperEventId']):
                if response.status_code == 200:
                    #Escribimos el fichero a disco
                    with open(file559full, "wb") as f:
                        f.write(response.text)
                        f.close()
                        #Actualizamos la cabecera
                        Rest._uicid = response.headers['X-Cafpa-UpperEventId']
                    #Movemos el fichero a la carpeta de salida
                    if GeneralFunctions.getarchivesize(file559full) > 0:
                        GeneralFunctions.ZipandMovetoUpload(file559name, file559full, SETTINGS.NPATH559,
                                                            SETTINGS.ZPATH559)
            else:
                LOG.info("No new events")

        except requests.exceptions.RequestException as e:
            LOG.warning("ERROR REQUESTS: %s" % str(e))
            pass

        except Exception as e:
            LOG.error("ERROR getting UIC 559 -->%s" % e)
            pass
        # ------------------------------------------------------------------------------
        # Mantener periodicidad
        # ------------------------------------------------------------------------------
        GeneralFunctions.keepperiod(10, start_time)
Exemple #12
0
def nc2text_pres_basin(ncfile, thresh, fo):
    """
    
    This function takes the IBTRACS TC file and writes to text file all those 
    landfalling TCs occurring in the Atlantic, with winds >thresh. 
    
    Slow - but won't be called often!
    
    """

    data = Dataset(ncfile, "r")
    nstorms = len(data.dimensions["storm"])
    time = data.variables["time"]

    # Open file, iterate, and write out
    with open(fo, "w") as fo:

        for row in range(nstorms):

            # Temporarily hold all time steps of this TC and check for
            # concurrency in TC+ status and landfall
            scratch_pres = data.variables["wmo_pres"][row].data[:]
            scratch_landfall = data.variables["landfall"][row].data[:]
            ind=np.logical_and(np.logical_and(scratch_pres<=thresh,\
                                          scratch_landfall==0),scratch_pres>0)

            ind2 = np.logical_and(ind, ~np.isnan(scratch_pres))

            if ind2.any():

                ntime_i = np.sum(~data.variables["wmo_pres"][row].mask[:])

                for col in range(ntime_i):

                    # Use this step to filter out spurious writing!
                    if data.variables["wmo_pres"][row, col].data > 0:

                        # Deal with time
                        yr,mon,day,hr,dt=GF.conTimes(\
                        time_str=time.units,calendar="standard"\
                         ,times=np.atleast_1d(time[row,col]),safe=False)

                        # Decimal time
                        decday = dt2decday(np.atleast_1d(dt))[0]

                        # Write out
                        str1="%.0f\t%.0f\t%.3f\t%.3f\t%.3f\t%.3f\t%.2f\t%.1f\t" \
                        % (row,yr[0],decday,data.variables["lon"][row,col],
                         data.variables["lat"][row,col],\
                         data.variables["wmo_wind"][row,col],\
                         data.variables["wmo_pres"][row,col],\
                         data.variables["landfall"][row,col])

                        str2=data.variables["basin"][row,col,0] + \
                         data.variables["basin"][row,col,1] + "\n"

                        fo.write(str1 + str2)

    return 0
Exemple #13
0
def ta_to_pchoice(ta):
    """
    from a treatment area name, load the file for pchoice for the treatment area
    """
    if not isinstance(ta, str):
        ta = ta_to_name(ta)
    path = f"../Intermediate_Files/{ta}_hospital_features_by_patient.csv"
    return GF.check_for_saved(path)
 def __validate_args__(self):
     self.lp("Validating args ...")
     D = self.args.__dict__
     MSG = ["" * 80, "Command-Line args:", "-" * 20]
     for key in sorted(D.keys()):
         MSG.append(GF.NVLR(key, 20) + " : " + str(D[key]))
     MSG.append("*" * 80)
     self.lp(MSG)
Exemple #15
0
def ta_to_pattr(ta):
    """
    from a treatment area name, load the file for pattr for the treatment area
    """
    if not isinstance(ta, str):
        ta = ta_to_name(ta)
    path = f"../Intermediate_Files/{ta}_imported_patient_attributes_data.csv"
    return GF.check_for_saved(path)
Exemple #16
0
def ta_to_hstruct(ta):
    """
    from a treatment area name, load the file for hstruct for the treatment area
    """
    if not isinstance(ta, str):
        ta = ta_to_name(ta)
    path = f"../Intermediate_Files/{ta}_imported_hospital_structural_data.csv"
    return GF.check_for_saved(path)
Exemple #17
0
def apply_BC(simNC,
             corrNC,
             var,
             cname,
             refname,
             outNC,
             retVal=False,
             prog=False):
    """
    
    Function takes a simulation file and a correction netCDF file 
    output from 'calc_BC' and uses these data to bias correct simNC
    This function wraps _apply_BC - which does the lifting
        
        -simNC   : netCDF file of simulation data to be corrected
        -corrNC  : netCDF file with the correction factors and their reference
        -cname   : string with name of the correction factors
        -refname : string with name of the reference values 
        
    """

    # Read in data
    sim_data = Dataset(simNC, "r")
    simgrid = sim_data[var]
    corr_data = Dataset(corrNC, "r")
    corrgrid = corr_data[cname][:, :, :]
    simref = corr_data[refname][:, :, :]
    mask = corrgrid.mask
    missVal = corr_data[cname]._FillValue
    lat_out = sim_data["lat"]
    lon_out = sim_data["lon"]
    time_out = sim_data["time"]

    # Call function to correct
    out = _apply_BC(simgrid, corrgrid, simref, mask, missVal, prog)

    # Write out
    varis = {"hi": [out, "degrees_Celsius"]}
    GF.write_nc(outNC,varis,lat_out,lon_out,time_out,\
             time_string="",lat_string="",\
             lon_string="",cal="",mv=missVal)

    if retVal: return out
    else: return 0
Exemple #18
0
def priorityGreedyComputeLayout(data, classified_ISs, previous_sequence,
                                previous_slot_base_layout,
                                previous_slot_segments,
                                use_heuristic_evaluator):
    # Initialize response variables.
    best_layout = None
    best_slot_base_layout = None
    best_slot_segments = None
    affects_computation_time = True

    # Initialize variables.
    fitness_cache = dict()
    current_timestep = data['time_step']
    extended_interaction_sessions, new_interaction_sessions = classified_ISs
    append_seq_length = len(new_interaction_sessions)

    # If there are no new ISs, the genome sequence is the same to previous.
    if append_seq_length == 0:
        seq_pool = [previous_sequence]
        (best_layout, best_fitness, best_seq, best_slot_base_layout,
         best_slot_segments) = Layout.evaluateSequences(
             seq_pool, data, fitness_cache, previous_slot_base_layout,
             previous_slot_segments)
        # If there are no new ISs, there is actually no need to re-conduct layout computation.
        affects_computation_time = False
    else:
        # For each new IS, test where its best to append.
        for i in range(append_seq_length):
            seq_pool = GeneralFunctions.generateSequenceCombinations(
                previous_sequence, 1, extended_interaction_sessions,
                previous_slot_base_layout)
            if not use_heuristic_evaluator:
                (best_layout, best_fitness, best_seq, best_slot_base_layout,
                 best_slot_segments) = Layout.evaluateSequences(
                     seq_pool, data, fitness_cache, previous_slot_base_layout,
                     previous_slot_segments)
            else:
                modified_interaction_sessions = [
                    data['interaction_sessions'][len(seq_pool[0]) - 1]
                ]
                (best_layout, best_fitness, best_seq, best_slot_base_layout,
                 best_slot_segments) = Layout.evaluateSequencesUsingHeuristics(
                     seq_pool, data, fitness_cache, previous_slot_base_layout,
                     previous_slot_segments, modified_interaction_sessions)
            # Update the previous layout info.
            previous_sequence = best_seq
            previous_slot_base_layout = best_slot_base_layout
            previous_slot_segments = best_slot_segments
            # Add the assigned IS to the extended ISs.
            extended_interaction_sessions.append(
                data['interaction_sessions'][len(previous_sequence) - 1])

    print "best sequence: %s, best fitness: %d" % (best_seq, best_fitness)
    return best_layout, best_seq, best_slot_base_layout, best_slot_segments, affects_computation_time
def hid_cdf(data,
            hts,
            species,
            z_resolution=1.0,
            pick=None,
            z_ind=0,
            mask=None):
    # vertical HID_cdf with bar plots I think
    delz = hts[1] - hts[0]
    if np.mod(z_resolution, delz) != 0:
        print(
            'Need even multiple of vertical resolution: {d.1f}'.format(d=delz))
        return
    hold = deepcopy(data)

    if mask is not None:
        #        print 'maskind HID data'
        hold[mask] = -1

    multiple = np.int(z_resolution / delz)

    # loop thru the species and just call the vertical hid volume
    all_vols = []
    for sp in range(len(species)):
        #print sp
        htsn, tdat = GF.vertical_hid_volume(hold,
                                            hts,
                                            delz, [sp + 1],
                                            z_resolution=z_resolution,
                                            pick=pick,
                                            z_ind=0)  # need the +1
        all_vols.append(tdat)

    all_vols = np.array(all_vols)
    all_cdf = np.zeros_like(all_vols)
    #9        print np.shape(all_vols)
    #3    print np.min(all_vols)
    # shape is 10,16, which is nspecies x nheights
    # need to do cdf on each level
    all_vols[all_vols == np.nan] = 0.0
    #    print np.max(all_vols)
    for iz in range(all_vols.shape[1]):
        # loop thru the vertical
        #        print all_vols[:,iz]
        #        print iz
        level_cum_vol = np.cumsum((all_vols[:, iz]))
        #        if level_cum_vol[-1] != 0:
        all_cdf[:, iz] = 100.0 * level_cum_vol / level_cum_vol[-1]
#        else:
#            all_cdf[:, iz] = 100.0*level_cum_vol/1.

#    all_cdf[np.isnan(all_cdf)] = 0.0
#    print np.max(all_cdf)
    return htsn, all_cdf  #, all_vols
Exemple #20
0
def optimize_lw_rdk(params, rh, tk, lw):

    c1 = params[0]
    c2 = params[1]
    c3 = params[2]

    lw_mod = _lw_rdk([c1, c2, c3], rh, tk)

    err = GF.RMSE(lw_mod, lw)

    return err
Exemple #21
0
def subset_travel_time_table(treatment_area,
                             patient_plzs=None,
                             hospitals=None,
                             export_path="../Intermediate_Files/",
                             from_scratch=False,
                             dropna=True):
    """
    treatment_area: string describing treatment area for output file name
    patient_plzs=None: list-like string of plzs to look for (as np.int64) (will be checked in near future)
    hospitals=None: list-like structure of hospital IDs to look for
    from_scratch=False: calculate from raw data (True), or load from existing file (False)
    dropna=True: boolean option to drop rows containing missing values

    Return a data frame of a subset of the full travel time calculations, based
    on which plzs and hospitals are relevant to the analysis
    """

    # check if data is already saved
    file_name = f"{treatment_area}_tt_plz_table"
    file_ext = "csv"
    file_path = "../Intermediate_Files/"
    if not from_scratch:
        file = GF.check_for_saved(file_path + file_name + "." + file_ext)
        if isinstance(file, pd.DataFrame): return file

    travel_time_all_TA = GF.check_for_saved(
        "../Intermediate_Files/tt_plz_full_lookup_table.csv")

    print("reducing travel time table")
    travel_time_selected_TA = travel_time_all_TA.loc[patient_plzs, hospitals]

    if dropna:
        travel_time_selected_TA.dropna(inplace=True)

    GF.export_data_to_file(travel_time_selected_TA,
                           filename=file_name,
                           path=export_path,
                           ftype=file_ext,
                           overwrite=True)

    return travel_time_selected_TA
def uniqueEntry(Array):
    try:
        col = len(Array[0])
        ranges = []
        for k in range(col):
            ranges.append(
                uniqueList(GF.SelectColumnsList(columnIndex=[k], list=Array)))

        return ranges
    except Exception as e:
        logging.exception(e)
        raise
def p2o(p):
    """
    Function to convert between air pressure and VO2 max -- using
    the regression equation of Bailey (2001)
    """
    frac = 0.2095  # Volume fraction of O2 in the atmopshere
    scalar = 1.33322  # mmHg --> hPa
    satvp = GF.satVpBolton(37.0) / 100.
    pio = frac * (p - satvp)  # partial pressure of oxygen in inspired air
    vo2max = (np.log(pio * (1 / scalar)) - 3.25) / 0.0308

    return vo2max
Exemple #24
0
def points_within_km(target_lon,target_lat,ref_lon,ref_lat,thresh):
    
    """
    Designed to take a target point and find all those locations within
    thresh km from it
    
    Returs array [ref_lon,ref_lat] where dist is below thresh
    """
    
    dists=GF.haversine_fast(target_lat,target_lon,ref_lat,ref_lon,miles=False)
    ind=dists<thresh
    return np.column_stack((ref_lon[ind],ref_lat[ind]))
def SWC_Processing1d(filename):
    """
    Convert a .swc file to a .vtp file
    Note that this assumes a particular order and meaning of the columns
    """
    print("Converting swc file.")
    newname = filename[:-3] + "vtp"
    dataset = [text.split() for text in open(filename)]

    number = [int(line[0]) for line in dataset]
    colour = [int(line[1]) for line in dataset]
    positions = [[float(line[2]),
                  float(line[3]),
                  float(line[4])] for line in dataset]
    radius = [float(line[5]) for line in dataset]
    connection = [int(line[6]) for line in dataset]

    links = [[] for node in range(0, len(number))]
    for i in range(0, len(number)):
        if connection[i] > 0:
            nodenumber = number[i] - 1
            linkedto = connection[i] - 1
            links[nodenumber].append(linkedto)
            links[linkedto].append(nodenumber)

    nodes = [Node() for node in range(0, len(number))]
    [nodes[index].SetRadius(rad) for index, rad in enumerate(radius)]
    [nodes[index].SetPosition(pos) for index, pos in enumerate(positions)]
    [nodes[index].SetMajorVesselID(c) for index, c in enumerate(colour)]

    for index, link in enumerate(links):
        for con in link:
            nodes[index].AddConnection(nodes[con])

    patient = PatientModule.Patient()
    patient.Topology.Nodes = nodes
    patient.Topology.AnatomyToVessels()
    vesseltype = [
        colour[patient.Topology.Nodes.index(vessel.Nodes[1])]
        for vessel in patient.Topology.Vessels
    ]
    patient.Topology.VesselAtlas = vesseltype

    pos = [node.Position for node in patient.Topology.Nodes]
    meanx = numpy.mean([p[0] for p in pos])
    meany = numpy.mean([p[1] for p in pos])
    meanz = numpy.mean([p[2] for p in pos])
    centermatrix = GeneralFunctions.TMatrix(1, [90, 0, 0],
                                            [-meanx, -meany, -meanz])
    patient.Topology.ApplyTransformation(centermatrix)

    patient.Topology.TopologyToVTP(newname)
    return patient
Exemple #26
0
def getCounters(ut,Rest,name):
    while True:
        try:
            # ------------------------------------------------------------------------------
            # Consulta UIC559
            # ------------------------------------------------------------------------------
            LOG.info("#" * GLOBAL.NCHAR)
            LOG.info("GETTING  COUNTERS")
            LOG.info("#" * GLOBAL.NCHAR)

            start_time = datetime.datetime.utcnow()

            COUNTER_VG="COUNTERS"

            try:
                counters = Rest.getVarGroupsValue(COUNTER_VG).text
                fileCountersname = "%s_%s_COUNTERS_%s.json" % (ut, name , GLOBAL.NOW_STR)
                fileCountersnamefull = "%s/%s" % (SETTINGS.NPATH559, fileCountersname)
                if counters.status_code == 200:
                    with open(fileCountersnamefull, "wb") as f:
                        f.write(counters.text)
                        f.close()
                # ------------------------------------------------------------------------------
                # ZIP counters
                # ------------------------------------------------------------------------------
                if counters.status_code == 200 and GeneralFunctions.getarchivesize(fileCountersnamefull) > 0:
                    GeneralFunctions.ZipandMovetoUpload(fileCountersname, fileCountersnamefull, SETTINGS.NPATH559, SETTINGS.ZPATH559)

            except requests.exceptions.RequestException as e:
                LOG.warning("ERROR REQUESTS: %s" % str(e))
                raise
            except Exception:
                LOG.error("ERROR getting counters")
                raise
        except:
            pass
        # ------------------------------------------------------------------------------
        # Mantener periodicidad
        # ------------------------------------------------------------------------------
        GeneralFunctions.keepperiod(10, start_time)
def ReadConfigToDict(sectionName, convertParseTo='string', hasComment=False):
    try:

        appDirectory = GF.GetRootDirectory()
        file_Address_Private_Config = GF.GetAddressTo(main=appDirectory,
                                                      folderName="Config",
                                                      fileName="Private",
                                                      extension="cnf")
        file_Address_Public_Config = GF.GetAddressTo(main=appDirectory,
                                                     folderName="Config",
                                                     fileName="Public",
                                                     extension="cnf")

        if GF.IsFileExist(file_Address_Private_Config):
            cnf = ReadDatabaseRaw(address=file_Address_Private_Config,
                                  sectionName=sectionName,
                                  hasComment=hasComment)

        elif GF.IsFileExist(file_Address_Public_Config):
            cnf = ReadDatabaseRaw(address=file_Address_Public_Config,
                                  sectionName=sectionName,
                                  hasComment=hasComment)

        else:
            raise Exception("Couldn't find config file")

        dict = {}
        if convertParseTo == 'string':
            for key, val in cnf.items():
                dict[key] = str(val)

        elif convertParseTo == 'float':
            for key, val in cnf.items():
                dict[key] = float(val)

        return dict

    except Exception as e:
        logging.exception(e)
        raise
Exemple #28
0
def dumpTableSetCSV(INFO, TableName, AddressMain, DBName=None):
    try:
        mySQLDB = connectDB(INFO, DBName)
        query = "SELECT * FROM " + str(TableName)

        A1 = GF.GetAddressTo(AddressMain,
                             "CSV",
                             fileName=str(TableName) + "_" +
                             GF.getDateandTimeUTC(),
                             extension="csv")
        results = pandas.read_sql_query(query, mySQLDB)
        results.to_csv(A1, index=False)

        query = "SELECT * FROM " + str(TableName) + "_Out"
        A1 = GF.GetAddressTo(AddressMain,
                             "CSV",
                             fileName=str(TableName) + "_Out" + "_" +
                             GF.getDateandTimeUTC(),
                             extension="csv")
        results = pandas.read_sql_query(query, mySQLDB)
        results.to_csv(A1, index=False)

        query = "SELECT * FROM " + str(TableName) + "_Shadow"
        A1 = GF.GetAddressTo(AddressMain,
                             "CSV",
                             fileName=str(TableName) + "_Shadow" + "_" +
                             GF.getDateandTimeUTC(),
                             extension="csv")
        results = pandas.read_sql_query(query, mySQLDB)
        results.to_csv(A1, index=False)

    except Exception as e:
        logging.exception(e)
        raise
    def __init__(self, DBInfo):
        try:
            logging.info("T-Matrix method started.")
            TMatrix_DB_Main_TableName = 'Raw_V1'
            DB = MySQLManagement(DBInfo)
            TMatrix_DB_Main_Column_Name, TMatrix_DB_Main_Data_Full = DB.ReadAllRowsfromTable(
                TableName=TMatrix_DB_Main_TableName)
            self.__TMatrix_DB_Main_Column_Name = GF.SelectColumnsList(
                columnIndex=[1, 2, 3, 4, 5],
                list=TMatrix_DB_Main_Column_Name,
                dimension=1)
            self.__TMatrix_DB_Main_SCT_Coeff_Full = GF.SelectColumnsList(
                columnIndex=[7], list=TMatrix_DB_Main_Data_Full)
            self.__TMatrix_DB_Main_ABS_Coeff_Full = GF.SelectColumnsList(
                columnIndex=[8], list=TMatrix_DB_Main_Data_Full)
            self.__TMatrix_DB_Main_Data_Full = GF.SelectColumnsList(
                columnIndex=[1, 2, 3, 4, 5], list=TMatrix_DB_Main_Data_Full)
            self.__TMatrix_DB_Main_Unique_Values = FN.uniqueEntry(
                self.__TMatrix_DB_Main_Data_Full)

        except Exception as e:
            logging.exception(e)
            raise
Exemple #30
0
def ta_to_TT(ta=None):
    """
    from a treatment area name, load the file for TT_selected_ta for the treatment area
    """
    if ta == "Dummy":
        path = "../Intermediate_Files/tt_plz_full_lookup_table_dummy.csv"

    elif subset_travel_time or not ta:
        if not isinstance(ta, str):
            ta = ta_to_name(ta)
        path = f"../Intermediate_Files/{ta}_tt_plz_table.csv"
    else:
        path = "../Intermediate_Files/tt_plz_full_lookup_table.csv"

    return GF.check_for_saved(path)
    def TMatrixInterpolator(self, FullMainDB, MainDBUniques, ABS_MainDB,
                            SCA_MainDB, TargetArray):
        try:

            Tolerance = [1, 3, 2, 2, 2]
            ABS_CS = []
            SCA_CS = []
            refinedInput, Area = self.TmatrixRefiner(TargetArray)
            for i in range(len(refinedInput)):
                input = refinedInput[i][:]
                XXX, index = FN.getToleratedArray(Array=FullMainDB,
                                                  Input=input,
                                                  Tolerance=Tolerance,
                                                  uniques=MainDBUniques)
                ABS_Coeff_Full = GF.SelectColumnsList(index,
                                                      ABS_MainDB,
                                                      dimension=1)
                SCA_Coeff_Full = GF.SelectColumnsList(index,
                                                      SCA_MainDB,
                                                      dimension=1)
                ABS_Interpolated = griddata(XXX,
                                            ABS_Coeff_Full,
                                            input,
                                            rescale=True)
                SCA_Interpolated = griddata(XXX,
                                            SCA_Coeff_Full,
                                            input,
                                            rescale=True)
                ABS_CS.append(Decimal(ABS_Interpolated[0]) * Area[i])
                SCA_CS.append(Decimal(SCA_Interpolated[0]) * Area[i])

            return ABS_CS, SCA_CS

        except Exception as e:
            logging.exception(e)
            raise
def plot_hid_cdf(data, hts, rconf=None, ax=None, pick=None):
    # this will just plot it
    if rconf is None:
        print("sorry, need rconf to run properly")
        return
    #print np.shape(data)
    if ax is None:
        fig, ax = plt.subplots(1, 1)
    else:
        fig = ax.get_figure()

    fig.subplots_adjust(left=0.07, top=0.93, right=0.8, bottom=0.1)

    for i, vl in enumerate(hts):
        #print vl,i
        #            print self.data[self.z_name].data[vl]
        #print data[0,:]
        #        print vl, rconf.hid_colors[1],data[0,i]
        ax.barh(vl,
                data[0, i],
                left=0.,
                edgecolor='none',
                color=rconf.hid_colors[1])
        #        print vl

        for spec in range(1, len(rconf.species)
                          ):  # now looping thru the species to make bar plot
            #             print rconf.hid_colors[spec+1]
            #            print data[spec-1,i]
            #            print spec, data[spec,i], data[spec-1,i]
            if data[spec - 1, i] == np.nan:
                print('shoot')
            ax.barh(vl, data[spec, i], left = data[spec-1, i], \
            color = rconf.hid_colors[spec+1], edgecolor = 'none')
    ax.set_xlim(0, 100)
    ax.set_xlabel('Cumulative frequency (%)')
    ax.set_ylabel('Height (km MSL)')
    # now have to do a custom colorbar?
    GF.HID_barplot_colorbar(
        rconf, fig)  # call separate HID colorbar function for bar plots

    return fig, ax
Exemple #33
0
	def POST(self):
		form = Inputs()
		if not form.validates():
			return render.index(form, HelpContext)
		else:
			#print form.d.Path, form['Analysis'].value, form['Discrimination function'].value, form['size'].value, form['Unit'].value
			try:
			
				if form['Discrimination function'].value == "limitsize":
					IntSize = int(form['size'].value)
				else:
					IntSize = 0
				
				if os.path.isdir(form.d.Path):
					#print RadioUnit
					#print form.d.Path
					FinalQuantity, FileQuantity, EndSize, TotalSize, EndList, Ending, Files, Skipt = DM.disk_monitor(
						form.d.Path.encode('latin-1'), 
						form['Analysis'].value, 
						form['Discrimination function'].value, 
						IntSize, 
						form['Unit'].value.lower()
						)
					
					#print FinalQuantity, FileQuantity, EndSize, TotalSize, EndList
					NewEndSize = GF.conversion_soft(EndSize)
					NewTotalSize = GF.conversion_soft(TotalSize)
					
					#print NewEndSize, NewTotalSize
					StrEndList = []
	
					for Item in EndList:
						TempSize = GF.conversion_soft(Item[1])
						aaa = Item[0]
						#print aaa
						TempStr = '<p><a href="file:///{0}">{0}</a>, Size: {1}</p>' .format(aaa,TempSize)
					
						StrEndList.append(TempStr)
					
					#StrEnd = "".join(StrEndList)
					#print StrEndList
					#print StrEnd
					
					#zzz = json.dumps(StrEndList)
			
					#print json.loads(zzz)
					return render.result(
						FinalQuantity, 
						FileQuantity, 
						NewEndSize, 
						NewTotalSize, 
						StrEndList,
						Ending,
						Files,
						Skipt
						)
						
				else:
					return render.index(form, HelpContext)
				
			except ValueError as e:
				print e
				return render.index(form, HelpContext)
Exemple #34
0
Discrimination = InputVerification[1][1]
Unit = InputVerification[2][1]

if "-help" in RootPath:
	for Item in HelpContext:
		print Item
	exit()
elif not(RootPath):
	Input = raw_input("Specify a valid path\n>")
	if os.path.isdir(Input):
		RootPath.append(Input)
	else:
		exit("No Path")

############################################################
#### Here begins the actual analysis using the inputs	####
############################################################



for Item in RootPath:

	FinalQuantity, FileQuantity, EndSize, TotalSize, EndList, Ending, Files, Skipt = disk_monitor(Item, Analysis, Discrimination, Size, Unit)
	
	print FinalQuantity, "Folders out of", FileQuantity, "Folders"
	print GF.conversion_soft(EndSize), "Out of", GF.conversion_soft(TotalSize)
	for Item in EndList:
		print Item[0], GF.conversion_soft(Item[1])
	print "Calculaton time:", Ending, "s"
	print "Total files: ", Files
	print Skipt, "Folder skipt"
Exemple #35
0
def emotes(message, messageObj):

    if len(message) == 0:
        return['Syntax is wrong: \n example: $emote -add kappa -link "http://goo.gl/ZEX6KN"', messageObj.channel]

    if '-help' in message:
        return["Usage, $emote -add <name> -link <\"link of image\"> -list  for list  -del <\"name\">to delete\n Please insure that there are quotes around the link \n Please use https://goo.gl/ to shorten the url.", messageObj.channel]

    elif '-list' in message:
        jsonTxt = GenFun.retrieveUrl()
        jsonTxt = jsonTxt['Emotes']
        messageTxt = ''

        for x in jsonTxt.keys():
            messageTxt = messageTxt + str(x)[0].upper() + str(x)[1:len(x)] + '\n'

        return[messageTxt, messageObj.channel]

    else:
        if '-link' in message or '-add' in message and '-list' in message or '-del' in message:

            message = message.replace(" ", "")

            jokes = GenFun.retrieveUrl()
            tmpJokes = jokes
            jokes = jokes["Emotes"]

            if '-del' in message:
                emoteToDel = message[message.find('-del"')+len('-del"'):message.rfind('"', 0, len(message))].lower()
                if len(emoteToDel) == 0:
                    return["ERROR nothing to delete",messageObj.channel]

                for x in jokes.keys():

                    if jokes[x]['trigger'] == emoteToDel:
                        jokes.pop(x, None)

                        tmpJokes['Emotes'] = jokes
                        GenFun.store(json.dumps(tmpJokes, indent=4, separators=(',', ': ')))

                        return['Deleted successfully', messageObj.channel]

                return["ERROR Something went wrong",messageObj.channel]



            if message[len(message)-1] == '"':

                add = message[message.find("-add")+len("-add"):message.find("-link")]
                link = message[message.find("-link")+len('-link"'):len(message)-1]

                if jokes.has_key(add):
                    return["That emote already exists!", messageObj.channel]
                else:
                    jokes[add] = {'response':link, 'trigger':add.lower()}

                    tmpJokes['Emotes'] = jokes

                    GenFun.store(json.dumps(tmpJokes, indent=4, separators=(',', ': ')))

                    return ["Successful", messageObj.channel]
                print "*EMOTE*"


            else:
                return['Syntax is wrong: \n example: $emote -add kappa -link "http://goo.gl/ZEX6KN"', messageObj.channel]

        else:
            return['Syntax is wrong: \n example: $emote -add kappa -link "http://goo.gl/ZEX6KN"', messageObj.channel]
            pass