Пример #1
0
 def add_distance_to_topic(self, table, rows, value):
   '''
   Add topic to topics table
   '''
   rows = npappend(rows, ('lda_id'))
   value = npappend(value, (self.lda_id))
   distance_id = self.insert_into_database(table, rows, value)
Пример #2
0
 def add_email(self, table, rows, value):
   '''
   Add email to email table
   '''
   rows = npappend(rows, ('dataset_id'))
   value = npappend(value, (self.dataset_id))
   self.email_id = self.insert_into_database(table, rows, value)
Пример #3
0
def periodic_derivative(x, y, max_periods):
    plot = False
    Ns = len(x)
    b, a = signalbutter(8, 2.0 * max_periods / Ns)
    ymid = interp(x + 0.5 * (x[1] - x[0]), x, y, period=2 * np_pi)
    yder = diff(ymid) / diff(x)
    #yder = Ns/(max(x)-min(x))*fftpackdiff(y,1,Ns)
    yder_filt = deepcopy(yder)
    x_filt = deepcopy(x)
    x_filt = npappend(x_filt, x_filt[-1] + x_filt[1] - x_filt[0])
    yder_filt = signalfiltfilt(b, a, npappend(yder_filt, yder_filt[0]))
    if plot:
        plt.figure(1)
        plt.subplot(311)
        plt.plot(x, y)

        plt.subplot(312)
        plt.plot(x[0:-1], yder)

        plt.subplot(313)
        plt.plot(x_filt[0:-1], yder_filt)
        plt.show()
    return yder_filt


#x = numpy.array(range(100))
#y = numpy.sin(twopi*x/100)+numpy.sin(twopi*x/10)
#periodic_derivative(x,y,4)
Пример #4
0
 def add_blob(self, table, rows, value):
   '''
   Add blob to blob table
   '''
   rows = npappend(rows, ('email_id', 'lda_id'))
   value = npappend(value, (self.email_id, self.lda_id))
   blob_id = self.insert_into_database(table, rows, value)
Пример #5
0
 def add_dict(self, table, rows, value):
   '''
   Add topicwords to topicwords table
   '''
   rows = npappend(rows, ('lda_id'))
   value = npappend(value, (self.lda_id))
   return self.insert_into_database(table, rows, value)
Пример #6
0
 def update_epidem(self,
                   days: int,
                   updates: tuple,
                   newsboard: list,
                   lockdown: int = 0,
                   zero_lock: bool = False,
                   early_action: bool = False,
                   intervention: bool = False,
                   vaccined: bool = False,
                   drugged: bool = False) -> None:
     '''Update'''
     if len(updates) == 5:
         for idx, val in enumerate(updates):
             x, y = self.lines[idx].get_data()
             x = npappend(x, days)
             y = npappend(y, val)
             self.lines[idx].set_data((x, y))
         day, cases = self.lines[2].get_data()
         if len(day) == 1:
             new_cases = 0
         else:
             new_cases = cases[-1] - cases[-2]
         x, y = self.lines[5].get_data()
         x = npappend(x, days)
         y = npappend(y, new_cases)
         self.lines[5].set_data((x, y))
         for idx, news in enumerate(newsboard):
             if idx < len(self.news_text):
                 self.news_text[idx].set_text(news)
             else:
                 self.news_text.append(
                     self.epidem_ax.text(.05,
                                         .95 - idx * 0.05,
                                         news,
                                         horizontalalignment='left',
                                         transform=self.epidem_ax.transAxes,
                                         fontweight='bold',
                                         color="#BF7F7FFF"))
         bgcolor = 0
         label_on = self.track_cb.get_status()
         for idx in range(len(self.lines)):
             self.lines[idx].set_visible(label_on[idx])
         if lockdown or (days < zero_lock and early_action):
             bgcolor += 0x3F0000
         elif intervention or early_action:
             bgcolor += 0x1F00
         if vaccined:
             bgcolor += 0x3F3F00
         if drugged:
             bgcolor += 0x3F
         bgcolor = "#" + "0" * (6 -
                                len(hex(bgcolor)[2:])) + hex(bgcolor)[2:]
         self.epidem_ax.set_facecolor(bgcolor)
         self.mypause(0.0005)
         self.epidem_ax.relim()
         self.epidem_ax.autoscale_view(True, True, True)
     return
Пример #7
0
 def add_lda(self, table, rows, value):
   '''
   Add lda to lda table
   '''
   self.insert = self.check_lda()
   if self.insert:
     rows = npappend(rows, ('lda_settings_id', 'dataset_id'))
     value = npappend(value, (self.lda_settings_id, self.dataset_id))
     self.lda_id = self.insert_into_database(table, rows, value)
Пример #8
0
        def _append(isotope):
            if kind in ('sniff', 'baseline', 'whiff'):
                if kind == 'sniff':
                    isotope._value = signal
                    # isotope.dirty = True

                isotope = getattr(isotope, kind)

            if kind == 'sniff':
                isotope._value = signal

            xs = npappend(isotope.xs, x)
            ys = npappend(isotope.ys, signal)
            isotope.xs = xs
            isotope.ys = ys
Пример #9
0
    def add_radio_measured_params_notes(self, table, cols, notes):
        '''
        Add event to the radio_measured_params_notes table.

        :param table: name of database table
        :param cols: cols in database table that need to be added
        :param notes: list of notes (each note is a string)
        :type table: str
        :type cols: list
        :type notes: list
        '''
        for idx, note in enumerate(notes):  # loop over all notes
            cols_i = npappend(cols[idx], ('rmp_id', 'last_modified', 'author'))
            value_i = npappend(note, (self.rmp_id, self.authortime,
                                      self.authorname))
            self.insert_into_database(table, cols_i, value_i)
Пример #10
0
    def add_observations(self, table, cols, value):
        '''
        Add event to the observations table.

        :param table: name of database table
        :param cols: cols in database table that need to be added
        :param value: values to be set for the cols
        :type table: str
        :type cols: list
        :type value: list
        '''
        cols = npappend(cols, ('frb_id', 'author_id'))
        value = npappend(value, (self.frb_id, self.author_id))
        # try to insert into database / return observation id
        self.obs_id = self.insert_into_database(table, cols, value)
        # update database if type is supersedes
        self.update_database(table, cols, value)
Пример #11
0
def load_PSZcatalog(unconf=False):
    from astropy.table import Table
    from numpy import append as npappend

    datapath = f'{os.environ["HOME"]}/Projects/planckClusters/catalogs'

    ps1 = Table.read(f'{datapath}/PSZ1v2.1.fits')
    ps2 = Table.read(f'{datapath}/PSZ2v1.fits')

    # convert to pandas
    df1 = ps1.to_pandas()
    df2 = ps2.to_pandas()

    if unconf:
        # only get unconfirmed sources
        df1 = df1.loc[df1['VALIDATION'] <= 3]
        df2 = df2.loc[df2['VALIDATION'] == -1]

    # clean up strings -- not required
    df1 = df1.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)
    df2 = df2.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)

    # merge the catalogs together
    df_m = df1.merge(df2,
                     how='outer',
                     left_on='INDEX',
                     right_on='PSZ',
                     suffixes=('_PSZ1', '_PSZ2'))

    # get the columns that we want
    cols = df_m.columns[[0, 1, 4, 5, 8, 29, 33, 34, 37, 38, 40, 51]]
    df_final = df_m[cols]

    # remerge to find bits that were missing
    df_final_bigger = df_final.merge(df2,
                                     how='left',
                                     left_on='INDEX_PSZ1',
                                     right_on='PSZ')
    # fill in nans
    for col in ['NAME', 'RA', 'DEC', 'SNR', 'REDSHIFT', 'INDEX']:
        df_final_bigger[col + '_PSZ2'] = \
            df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col])
    # fill in nans
    for col in ['NAME', 'RA', 'DEC', 'SNR', 'REDSHIFT', 'INDEX']:
        df_final_bigger[col + '_PSZ2'] = \
            df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col])
    for col in ['NAME', 'RA', 'DEC']:
        df_final_bigger[col] = \
            df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col + '_PSZ1'])

    df_final_bigger = \
        df_final_bigger[npappend(df_final_bigger.columns[:12].values, ['NAME',
                                                                   'RA',
                                                                   'DEC'])]

    return df_final_bigger
Пример #12
0
 def add_topics(self):
   '''
   Insert all topics into database
   '''
   for idx in range(0,self.numtopics):
     try:
       topic_ids = npappend(topic_ids, self.add_topic('topic', nparray(['name']), nparray([get_random_name(letters, 5)])))
     except NameError:
       topic_ids = self.add_topic('topic', nparray(['name']), nparray([get_random_name(letters, 5)]))
   return topic_ids
Пример #13
0
        def _append(isotope):
            if kind in ('sniff', 'baseline', 'whiff'):
                if kind == 'sniff':
                    isotope._value = signal
                    isotope.dirty = True

                isotope = getattr(isotope, kind)

            if kind == 'sniff':
                isotope._value = signal

            xs = npappend(isotope.xs, x)
            ys = npappend(isotope.ys, signal)
            isotope.xs = xs
            isotope.ys = ys
            # isotope.trait_setq(xs=xs, ys=ys)
            # isotope.xs = hstack((isotope.xs, (x,)))
            # isotope.ys = hstack((isotope.ys, (signal,)))
            isotope.dirty = True
Пример #14
0
    def add_radio_measured_params(self, table, cols, value):
        '''
        Add event to the radio_measured_params table

        :param table: name of database table
        :param cols: cols in database table that need to be added
        :param value: values to be set for the cols
        :type table: str
        :type cols: list
        :type value: list
        '''
        rank = self.set_rank()
        cols = npappend(cols, ('rop_id', 'author_id', 'rank'))
        value = npappend(value, (self.rop_id, self.author_id, rank))
        ivorn = value[cols == 'voevent_ivorn'][0]
        self.event_exists = self.check_event_exists(ivorn)
        # add event to the database if it does not exist yet
        self.rmp_id = self.insert_into_database(table, cols, value)
        # update database if type is supersedes
        self.update_database(table, cols, value)
Пример #15
0
 def add_words(self):
   '''
   Add all words to the dictionary table
   '''
   for word in self.randwords:
     rows = nparray(['word'])
     values = nparray([word])
     try:
       word_ids = npappend(word_ids, self.add_dict('dict', rows, values))
     except NameError:
       word_ids = self.add_dict('dict', rows, values)
   return word_ids
Пример #16
0
    def add_radio_observations_params(self, table, cols, value):
        '''
        Add event to the radio_observations_params table.

        :param table: name of database table
        :param cols: cols in database table that need to be added
        :param value: values to be set for the cols
        :type table: str
        :type cols: list
        :type value: list
        '''
        # create settigns_id if we don't have one yet
        if 'settings_id' not in cols:
            settings_id2 = str(nparray(value)[nparray(cols) == 'raj'][0]
                               ) + ';' + str(
                                 nparray(value)[nparray(cols) == 'decj'][0])
            settings_id = self.settings_id1 + ';' + settings_id2
        cols = npappend(cols, ('obs_id', 'author_id', 'settings_id'))
        value = npappend(value, (self.obs_id, self.author_id, settings_id))
        self.rop_id = self.insert_into_database(table, cols, value)
        # update database if type is supersedes
        self.update_database(table, cols, value)
Пример #17
0
p_0_kmpfit_Norm.append(round(continuum_error_norm, 3))   # Zero level
Dictionary_Conditions.append({})

print 'Dictionary_Conditions', Dictionary_Conditions

fitobj.parinfo = Dictionary_Conditions

try:
    fitobj.fit(params0 = p_0_kmpfit_Norm)
except Exception, mes:
    print "Something wrong with fit: ", mes
    raise SystemExit

p1_Norm_kmpfit = fitobj.params[0:-1]
p1_kmpfit = npappend(Rescale_GaussianParameters(p1_Norm_kmpfit, A_Values, mu_Values, Number_Gaussians), Continuum_error)

print 'p1       ', p_1
print 'p1 kmpfit', p1_kmpfit, type(p1_kmpfit)

#---------Making the plot----------
Fig     = plt.figure(figsize = (16, 10))
Axis    = Fig.add_subplot(111)
Axis.set_xlabel('Wavelength',fontsize=15)
Axis.set_ylabel('Flux', fontsize=15)
Fig.set_facecolor('w')
Axis.plot(Wave, Flux, color='Blue', label="Spectrum")
Axis.plot(Wave_Gaussian, Flux_Gaussian, color='Red',  label="Fitted Gaussian mixture")
Axis.plot(mu_Values, A_Values, 'o', color='orange',  label="Maxima initial guess")
Axis.fill_between(Wave_Region, Continuum_Region, Flux_Region, color='grey', alpha=0.25, label='SelectionRegion')
Пример #18
0
def plot():
    plt.plot(city[:, 0], city[:, 1], 'ro')
    index = mRoute[:, aDistance.argmin()]
    plt.plot(city[npappend(index, index[0]), 0],
             city[npappend(index, index[0]), 1])
Пример #19
0
    def __add__(self, indiv: person):
        '''add individuals in population'''
        self.pop_size += 1

        # Append numpy arrays
        self.active = npappend(self.active, indiv.active)
        self.recovered = npappend(self.recovered, indiv.recovered)
        self.susceptible = npappend(self.susceptible, indiv.susceptible)
        self.health = npappend(self.health, indiv.health)
        self.support = npappend(self.support, indiv.support)
        self.comorbidity = npappend(self.comorbidity, indiv.comorbidity)
        self.progress = npappend(self.progress, indiv.progress)
        self.move_per_day = npappend(self.move_per_day, indiv.move_per_day)
        if indiv.strain:
            self.cfr = npappend(self.cfr, indiv.strain.cfr)
            self.inf_per_day = npappend(self.inf_per_day,
                                        indiv.strain.inf_per_day)
        else:
            self.cfr = npappend(self.cfr, 0)
            self.inf_per_day = npappend(self.inf_per_day, 0)
        if indiv.strain in self.strain_types:
            idx = self.strain_types.index(indiv.strain)
        else:
            self.strain_types.append(indiv.strain)
            idx = len(self.strain_types) - 1
        self.strain = npappend(self.strain, idx)
        self.rms_v = npappend(self.rms_v, indiv.rms_v)
        self.home = npappend(self.home,
                             nparray(indiv.home, dtype=npint64).reshape(
                                 (1, 2)),
                             axis=0)
        return
p_0_kmpfit_Norm.append(round(continuum_error_norm, 3))  # Zero level
Dictionary_Conditions.append({})

print 'Dictionary_Conditions', Dictionary_Conditions

fitobj.parinfo = Dictionary_Conditions

try:
    fitobj.fit(params0=p_0_kmpfit_Norm)
except Exception, mes:
    print "Something wrong with fit: ", mes
    raise SystemExit

p1_Norm_kmpfit = fitobj.params[0:-1]
p1_kmpfit = npappend(
    Rescale_GaussianParameters(p1_Norm_kmpfit, A_Values, mu_Values,
                               Number_Gaussians), Continuum_error)

print 'p1       ', p_1
print 'p1 kmpfit', p1_kmpfit, type(p1_kmpfit)

#---------Making the plot----------
Fig = plt.figure(figsize=(16, 10))
Axis = Fig.add_subplot(111)
Axis.set_xlabel('Wavelength', fontsize=15)
Axis.set_ylabel('Flux', fontsize=15)
Fig.set_facecolor('w')
Axis.plot(Wave, Flux, color='Blue', label="Spectrum")
Axis.plot(Wave_Gaussian,
          Flux_Gaussian,
          color='Red',
Пример #21
0
def solution(*, initial_conditions, solver_config, save_data, folder_name):
    """
    Compute the solution to the model given the input initial conditions and solver configuration
    Saves the solution file.
    :return: List of lists: solution, times, internal_data object
    """
    mp.dps = 50

    # Initialise the time over which to solve the system
    time = linspace(solver_config.start, solver_config.stop,
                    int(solver_config.num_time))
    time_length_passed = solver_config.num_time

    # Compute the solution
    if (not solver_config.enable_taylor_jump
            and not solver_config.enable_tstop):  # No Taylor jump or tstop
        print("Solving the ODE system!")
        soln, internal_data = calc_numerical_solution(
            initial_conditions=initial_conditions,
            solver_config=solver_config,
            time=time,
            tstop=0,
        )
        soln_y = soln.values.y
        soln_t = soln.values.t
        soln_flag = [soln.flag]
        jump_times = []

    elif (solver_config.enable_taylor_jump
          or solver_config.enable_tstop):  # Either Taylor jumps or tstops

        # Checks to make sure everything required for either tay jumps or tstops is present.
        if solver_config.enable_tstop:
            if len(initial_conditions.after_tstop_params) != len(
                    solver_config.tstop_times):
                raise RuntimeError(
                    "Input Error: Amount of tstops ({}) != amount of new params ({})"
                    .format(initial_conditions.after_tstop_params,
                            solver_config.tstop_times))
            if solver_config.tstop_times[-1] > solver_config.stop:
                raise RuntimeError(
                    "Input Error: Selected to use tstops after simulation will end"
                )
        if solver_config.enable_taylor_jump:
            if solver_config.taylor_jump_threshold <= 0:
                raise RuntimeError(
                    "Input Error: Selected non-positive ({}) taylor jump threshold"
                    .format(solver_config.taylor_jump_threshold))

        # Initialise the data objects to solve the solution where we can jump several times
        soln_y = None
        soln_t = None
        soln_flag = None
        internal_data = None
        jump_times = []

        # Set the first round of initial conditions for the ode
        current_soln_initial_conditions = deepcopy(initial_conditions)

        # Create the tstops and taylor jumps
        tstop_times_remaining = solver_config.tstop_times.copy()
        tstop_times_remaining.append(
            0
        )  # Add a zero so that the final run will not stop until specified end time
        tay_jumps_remaining = solver_config.taylor_jumps_num

        print("Solving the ODE system!")
        while True:

            # Compute solution
            current_soln, current_internal_data = calc_numerical_solution(
                initial_conditions=current_soln_initial_conditions,
                solver_config=solver_config,
                time=time,
                tstop=tstop_times_remaining[0],
            )

            # save solution
            if soln_y is None:
                soln_y = current_soln.values.y
                soln_t = current_soln.values.t
                soln_flag = [current_soln.flag]
                internal_data = current_internal_data

            else:
                soln_y = concatenate((soln_y, current_soln.values.y))
                soln_t = concatenate((soln_t, current_soln.values.t))
                soln_flag = soln_flag + [current_soln.flag]
                internal_data = internal_data + current_internal_data

            # Check why the solver stopped.
            if current_soln.flag == StatusEnum.SUCCESS:  # Solver Completed
                print("Solver succeeded.")
                break

            elif (current_soln.flag == StatusEnum.ROOT_RETURN
                  ):  # Solver found root, i.e. theta double dot singularity
                print("Solver: attempting to preform Taylor jump procedure.")
                if not solver_config.enable_taylor_jump:
                    print(
                        "Solver stopped because it found the root but Taylor jump is disabled"
                    )
                    break
                else:
                    if tay_jumps_remaining != 0:
                        print("Performing taylor jump at {}".format(
                            current_soln.values.t[-1]))
                        jump_amount = get_jump_amount(
                            x=current_soln.values.y[-1, ODEIndex.x],
                            xdot=current_soln.values.y[-1, ODEIndex.xdot],
                            y=current_soln.values.y[-1, ODEIndex.y],
                            ydot=current_soln.values.y[-1, ODEIndex.ydot],
                            override=
                            False,  # can override with manual number. e.g. 3e-3
                        )
                        post_jump_time, post_jump_sol = calc_xy_singularity(
                            current_sol=current_soln,
                            jump_amount=jump_amount,
                            system_config=initial_conditions,
                        )
                        # Save the jump data
                        soln_y = concatenate(
                            (soln_y, asarray([post_jump_sol])), axis=0)
                        soln_t = npappend(soln_t, post_jump_time)
                        soln_flag = soln_flag + [current_soln.flag]
                        jump_times.append(
                            "Solver: Successfully performed Taylor jump at time {}"
                            .format(current_soln.values.t[-1]))
                        tay_jumps_remaining -= 1

                        # Check if we taylor jumped over the specified stopping time
                        if post_jump_time > solver_config.stop:
                            print("Solver: Jumped over the wanted stop time")
                            break
                        # Check if Taylor series jumped over a tstop time.
                        if solver_config.enable_tstop:
                            if tstop_times_remaining != [0]:
                                if post_jump_sol > tstop_times_remaining[0]:
                                    print(
                                        "Solver: Taylor series jumped over a tstop time. I will continue anyway."
                                    )
                        # Reset the initial conditions of the solver to the post-jump conditions
                        current_soln_initial_conditions.ode_init_con = post_jump_sol
                        time_length_passed = time_length_passed - len(
                            current_soln.values.t)
                        time = linspace(post_jump_time, solver_config.stop,
                                        time_length_passed)

                    else:
                        print(
                            "The solver wanted to preform a Taylor Jump but you run out of allowed Jumps. "
                            "Try increasing the amount of allowed jumps")
                        break

            elif (current_soln.flag == StatusEnum.TSTOP_RETURN
                  ):  # Solver got to the required stop time.
                print("Solver: attempting to preform tstop procedure.")
                if not solver_config.enable_tstop:
                    print(
                        "Solver wanted to stop because it reached tstop but tstop is disabled."
                    )
                # Change the initial conditions for the new run
                current_soln_initial_conditions.ode_init_con = current_soln.values.y[
                    -1, :]
                time_length_passed = time_length_passed - len(
                    current_soln.values.t)
                time = linspace(current_soln.values.t[-1], solver_config.stop,
                                time_length_passed)
                # Update the initial conditions that the solver will call after tstop.
                print("vals going in are", current_soln_initial_conditions)
                for key, val in current_soln_initial_conditions.after_tstop_params[
                        0].items():
                    # Set the new initial conditions to be the updated initial conditions
                    print("key, val", key, val)
                    print("inital ones ", getattr(initial_conditions, key))
                    print("thing before",
                          getattr(current_soln_initial_conditions, key))
                    if key == "ode_init_con":
                        setattr(
                            current_soln_initial_conditions,
                            key,
                            val *
                            getattr(current_soln_initial_conditions, key),
                        )
                    else:
                        setattr(
                            current_soln_initial_conditions,
                            key,
                            val * getattr(initial_conditions, key),
                        )
                    print("thing after",
                          getattr(current_soln_initial_conditions, key))
                current_soln_initial_conditions.after_tstop_params.pop(0)
                # Change the tstop for the following run.
                if tstop_times_remaining == [0]:
                    print(
                        "Solver has completed all the required tstops. Will now compute the solution to end of time"
                    )
                else:
                    tstop_times_remaining.pop(0)
                    print("Solver: preformed the tstop procedure")

            elif current_soln.flag == StatusEnum.WARNING:
                print(
                    "The solver completed solving the solution but something unusable happened"
                )
                print("The message was {}".format(current_soln.message))

            elif current_soln.flag < 0:
                print("The solver failed.")
                print("Flag = {}".format(current_soln.flag))
                print("Message = {}".format(current_soln.message))
                break

            else:
                raise SystemExit("The solver returned a unknown flag. Exiting")

    else:
        print("Could not figure out how you wanted to solve the system.")
        raise SystemExit

    # Create and save the results file
    if save_data:
        if not isdir(folder_name):
            print("No directory '{}' existed. Creating directory.".format(
                folder_name))
            try:
                mkdir(folder_name)
            except OSError:
                print("Could not create the directory {}.".format(folder_name))
                print("Fix directory or set save data to False")
                raise SystemExit
            else:
                print("Directory created")

        create_results_file(
            Solution(
                solution=soln_y,
                times=soln_t,
                flags=soln_flag,
                tstop_times=solver_config.tstop_times,
                jump_times=jump_times,
            ),
            internal_data,
            initial_conditions,
            solver_config,
            folder_name,
        )

    return soln_y, soln_t, soln_flag, internal_data
Пример #22
0
def heredity():
    global mRoute, mConbine
    # Cross 方案一(交叉位置交叉法)
    # for ii in range(nCross//2):
    #     fGene = np.zeros(size*2).astype(np.int)
    #     cGene0 = list()
    #     cGene1 = list()
    #     for ij in range(size):
    #         fGene[[ij*2, ij*2+1]] = [mRoute[:, aCross[ii]][ij],
    #                                  mRoute[:, aCross[ii+nCross//2]][ij]]
    #     for item in fGene:
    #         if (not item in cGene0):
    #             cGene0.append(item)
    #         else:
    #             cGene1.append(item)
    #     mRoute[:, aAban[ii*2]] = cGene0
    #     mRoute[:, aAban[ii*2+1]] = cGene1

    # Cross 方案二(基于运行效率考虑,交叉原理同方案一)
    # 备份待交叉个体,防止被下一步操作影响(部分个体同时存在于待交叉与待舍弃行列)
    # mCross[:, 0:nCross] = mRoute[:, aCross[0:nCross]]
    # # 对所有即将被交叉替代路线操作,路线值为负,方便后续查找
    # mRoute[:, aAban[0:nCross]] -= size
    # for ii in range(nCross//2):
    #     # 数组指针
    #     ptr0 = ptr1 = 0
    #     for ij in range(size):
    #         # 第一个父代个体
    #         if (mCross[:, ii][ij] in mRoute[:, aAban[ii*2]]):
    #             mRoute[:, aAban[ii*2+1]][ptr1] = mCross[:, ii][ij]
    #             ptr1 += 1
    #         else:
    #             mRoute[:, aAban[ii*2]][ptr0] = mCross[:, ii][ij]
    #             ptr0 += 1
    #         # 第二个父代个体
    #         if (mCross[:, ii+nCross//2][ij] in mRoute[:, aAban[ii*2]]):
    #             # 存在边界问题
    #             mRoute[:, aAban[ii*2+1]][ptr1] = mCross[:, ii+nCross//2][ij]
    #             ptr1 += 1
    #         else:
    #             mRoute[:, aAban[ii*2]][ptr0] = mCross[:, ii+nCross//2][ij]
    #             ptr0 += 1

    # 方案三(高效 不同于方案一、方案二 直接交叉 去重补缺)
    # 点位选取
    position = np.random.randint(0, size, nCross // 2)
    # 程度(交叉位置数)
    level = np.random.randint(1, size, nCross // 2)
    # 交叉位置索引
    crossArea = np.array(
        [np.arange(x, x + y) % size for x, y in zip(position, level)])
    # 带交叉父代基因合并
    mConbine[0:size, 0:nCross // 2] = mRoute[:, aCross[0:nCross // 2]]
    mConbine[size:size * 2, 0:nCross // 2] = mRoute[:,
                                                    aCross[nCross // 2:nCross]]
    entirety = np.arange(size * 2)
    for ii in range(nCross // 2):
        mConbine[[npappend(crossArea[ii], crossArea[ii] + size)],
                 ii] = mConbine[
                     [npappend(crossArea[ii] + size, crossArea[ii])], ii]
        # 找出未重复的一组路线
        _, index = np.unique(mConbine[:, ii], return_index=True)
        mRoute[:, aAban[ii * 2]] = mConbine[:, ii][np.sort(index)]
        # 剩下的一组路线
        # indexLeft = entirety[np.isin(entirety, index, invert=True)]
        indexLeft = np.array(list(set(entirety) - set(index)))
        mRoute[:, aAban[ii * 2 + 1]] = mConbine[:, ii][np.sort(indexLeft)]

    # # Variation
    # # 优质个体直接变异替代被舍弃个体
    # # 变异
    # # # 点位选取
    # position0 = np.random.randint(0, size, nCloneWithVari)
    # # 变异程度(移动位置数)
    # level = np.random.randint(1, size//3, nCloneWithVari)
    # position1 = np.array([nprandint(x+y, x+size-y) %
    #                       size for x, y in zip(position0, level)])
    # posSwap = np.array([(np.arange(x, x+y) % size, np.arange(y, y+z) % size)
    #                     for x, y, z in zip(position0, position1, level)])
    # mRoute[:, aAban[nCross:nCross+nCloneWithVari]
    #        ] = mRoute[:, aCloneWithVari[0:nCloneWithVari]]
    # for ii in range(nCloneWithVari):
    #     mRoute[:, aAban[ii+nCross]][(npappend(posSwap[ii][0], posSwap[ii][1]))
    #                                 ] = mRoute[:, aAban[ii+nCross]][(npappend(posSwap[ii][0], posSwap[ii][1]))]

    # # 方法二(所有新个体与原个体均不同,相比方法一收敛减慢)
    # 点位选取
    position = np.random.randint(0, size, nCloneWithVari)
    # 变异程度(移动位置数)
    level = np.random.randint(1, size, nCloneWithVari)
    posInsert = np.array([nprandint(0, size - x) for x in level])
    for ii in range(nCloneWithVari):
        # 选中位移的位置
        posSelect = np.arange(position[ii], position[ii] + level[ii]) % size
        # 剩余位置向量
        posLeft = np.arange(position[ii] + level[ii],
                            position[ii] + size) % size
        #
        mRoute[:,
               aAban[ii +
                     nCross]][0:posInsert[ii]] = mRoute[:, aCloneWithVari[ii]][
                         posLeft[0:posInsert[ii]]]

        mRoute[:, aAban[ii + nCross]][
            posInsert[ii]:posInsert[ii] +
            level[ii]] = mRoute[:, aCloneWithVari[ii]][posSelect]

        mRoute[:,
               aAban[ii + nCross]][posInsert[ii] +
                                   level[ii]:] = mRoute[:, aCloneWithVari[ii]][
                                       posLeft[posInsert[ii]:]]

    # 变异
    # 点位选取
    position0 = np.random.randint(0, size, nVari)
    # 变异程度(移动位置数)
    level = np.random.randint(1, 5, nVari)
    position1 = np.array([
        nprandint(x + y, x + size - y) % size
        for x, y in zip(position0, level)
    ])
    posSwap = np.array([(np.arange(x, x + y) % size,
                         np.arange(y, y + z) % size)
                        for x, y, z in zip(position0, position1, level)])
    # mVari[:, 0:nVari] = mRoute[:, aVari[0:nVari]]
    for ii in range(nVari):
        mRoute[:, aVari[ii]][(npappend(
            posSwap[ii][0], posSwap[ii][1]))] = mRoute[:, aVari[ii]][(npappend(
                posSwap[ii][0], posSwap[ii][1]))]
Пример #23
0
def simulate(city,
             logfile,
             simul_pop,
             med_eff: float = 0.,
             med_recov: float = 0,
             vac_res: float = 0,
             vac_cov: float = 0.,
             movement_restrict: int = 0,
             contact_restrict: int = 0,
             lockdown_chunk: int = 0,
             lockdown_panic: int = 1,
             seed_inf: int = 0,
             zero_lock: bool = False,
             intervention: bool = False,
             early_action=False,
             plot_h=None) -> tuple:
    '''Recursive simulation of each day'''
    vaccine_discovery_date = 0
    drug_discovery_date = 0
    while not vaccine_discovery_date:
        for idx, k in enumerate(nprandom.random(size=(int(5 /
                                                          MED_DISCOVERY)))):
            if k < MED_DISCOVERY:
                vaccine_discovery_date = idx
    while not drug_discovery_date:
        for idx, k in enumerate(nprandom.random(size=(int(5 /
                                                          MED_DISCOVERY)))):
            if k < MED_DISCOVERY:
                drug_discovery_date = idx
    lockdown = 0
    next_lockdown = seed_inf * lockdown_panic
    # Track infection trends
    track: nparray = nparray([[]] * 0, dtype=npint64).reshape((0, 5))
    days = 0
    args = city.survey(simul_pop)
    newsboard = (
        "Total Population %d" % city.pop_size,
        "ICUs Available: %d/%d" %
        (city.infrastructure - args[3], city.infrastructure),
    )
    reaction = (zero_lock, early_action, intervention,
                days > vaccine_discovery_date, days > drug_discovery_date)
    plot_h.update_epidem(days, args, newsboard, lockdown, *reaction)
    track = npappend(track, nparray(args).reshape((1, 5)), axis=0)
    print(*args, file=logfile, flush=True)
    city.pass_day(plot_h)  # IT STARTS!
    while npany(city.space_contam):  # Absent from persons and places
        if days == vaccine_discovery_date:
            city.vaccine_resist = vac_res
            city.vaccine_cov = vac_cov
        if days == drug_discovery_date:
            for idx, pathy in enumerate(city.strain_types):
                if pathy is not None:
                    city.strain_types[idx].inf_per_day /= med_recov
                    city.strain_types[idx].cfr *= med_eff
                    city.inf_per_day /= med_recov
                    city.cfr *= med_eff
        if early_action:
            if not days:
                # Restrict movement
                city.rms_v //= movement_restrict
                city.move_per_day //= contact_restrict
            elif days == zero_lock:
                # End of initial lockdown
                city.rms_v *= movement_restrict
                city.move_per_day *= contact_restrict
        days += 1
        args = city.survey(simul_pop)
        newsboard = (
            "Total Population %d" % city.pop_size,
            "ICUs Available: %d/%d" %
            (city.infrastructure - args[3], city.infrastructure),
        )
        if days > drug_discovery_date:
            newsboard.append("Drug Discovered")
        if days > vaccine_discovery_date:
            newsboard.append("Vaccine Discovered")
        track = npappend(track, nparray(args).reshape((1, 5)), axis=0)
        print(*args, file=logfile, flush=True)
        city.pass_day(plot_h)
        reaction = (zero_lock, early_action, intervention,
                    days > vaccine_discovery_date, days > drug_discovery_date)
        plot_h.update_epidem(days, args, newsboard, lockdown, *reaction)
        if intervention and lockdown == 0 and (args[2] > next_lockdown):
            next_lockdown *= lockdown_panic
            # Panic by infection Spread
            lockdown = 1
            city.rms_v //= movement_restrict
            city.move_per_day //= contact_restrict
        if intervention and lockdown:
            lockdown += 1
        if intervention and lockdown > lockdown_chunk + 1:
            # Business as usual
            city.rms_v *= movement_restrict
            city.move_per_day *= contact_restrict
            lockdown = 0
    args = city.survey(simul_pop)
    newsboard = (
        "Total Population %d" % city.pop_size,
        "ICUs Available: %d/%d" %
        (city.infrastructure - args[3], city.infrastructure),
    )
    if days > drug_discovery_date:
        newsboard.append("Drug Discovered")
    if days > vaccine_discovery_date:
        newsboard.append("Vaccine Discovered")
    track = npappend(track, nparray(args).reshape((1, 5)), axis=0)
    print(*args, file=logfile, flush=True)
    reaction = (zero_lock, early_action, intervention,
                days > vaccine_discovery_date, days > drug_discovery_date)
    plot_h.update_epidem(days, args, newsboard, lockdown, *reaction)
    return
Пример #24
0
def main():
    import time
    then = time.time()
    from numpy import append as npappend

    def prof():
        global RESP, INDEX_T_DAS, INDEX_T_MAP, SD, EXREC, MINIPH5, Das, SIZE,\
            ARRAY_T, RH, LAT, LON, F, TRACE_JSON, APPEND

        MINIPH5 = None
        ARRAY_T = {}

        def get_das(sd):
            #   Return line_station or das#[-9:]
            try:
                das = "{0}X{1}".format(
                    sd.reel_headers.extended_header_3.line_number,
                    sd.reel_headers.extended_header_3.receiver_point)
            except Exception:
                try:
                    das = "{0}X{1}".format(
                        sd.reel_headers.external_header.receiver_line,
                        sd.reel_headers.external_header.receiver_point)
                except Exception:
                    das = "sn" + \
                          str(sd.reel_headers.general_header_block_1.
                              manufactures_sn)
                    if das == 0:
                        das = "id" + \
                              str(sd.reel_headers
                                  .extended_header_1.id_number)[-9:]

            return das

        def get_node(sd):
            #   Return node part number, node id, and number of channels
            pn = None  # Part Number
            id = None  # Node ID
            nc = None  # Number of channel sets
            try:
                nc = sd.reel_headers.general_header_block_1[
                    'chan_sets_per_scan']
                pn = sd.reel_headers.extended_header_1['part_number']
                id = sd.reel_headers.extended_header_1['id_number']
            except Exception:
                pass

            return pn, id, nc

        try:
            get_args()
        except Exception, err_msg:
            LOGGER.error(err_msg)
            return 1

        initializeExperiment()
        LOGGER.info("segd2ph5 {0}".format(PROG_VERSION))
        LOGGER.info("{0}".format(sys.argv))
        if len(FILES) > 0:
            RESP = Resp(EX.ph5_g_responses)
            rows, keys = EX.ph5_g_receivers.read_index()
            INDEX_T_DAS = Rows_Keys(rows, keys)
            rows, keys = EX.ph5_g_maps.read_index()
            INDEX_T_MAP = Rows_Keys(rows, keys)

        for f in FILES:
            F = f
            traces = []
            TRACE_JSON = []
            try:
                SIZE = os.path.getsize(f)
            except Exception as e:
                LOGGER.error("Failed to read {0}, {1}.\
                 Skipping...\n".format(f, str(e.message)))
                continue

            SD = segdreader.Reader(infile=f)
            LAT = None
            LON = None
            RH = False
            if not SD.isSEGD(expected_manufactures_code=MANUFACTURERS_CODE):
                LOGGER.error(
                    "{0} is not a Fairfield SEG-D file. Skipping.".format(
                        SD.name()))
                continue

            try:
                SD.process_general_headers()
                SD.process_channel_set_descriptors()
                SD.process_extended_headers()
                SD.process_external_headers()
            except segdreader.InputsError as e:
                LOGGER.error("Possible bad SEG-D file -- {0}".format("".join(
                    e.message)))
                continue

            nleft = APPEND
            Das = get_das(SD)
            part_number, node_id, number_of_channels = get_node(SD)
            EXREC = get_current_data_only(SIZE, Das)
            LOGGER.info(":<Processing>: {0}\n".format(SD.name()))
            LOGGER.info("Processing: {0}... Size: {1}\n".format(
                SD.name(), SIZE))
            if EXREC.filename != MINIPH5:
                LOGGER.info("Opened: {0}...\n".format(EXREC.filename))
                LOGGER.info(
                    "DAS: {0}, Node ID: {1}, PN: {2}, Channels: {3}".format(
                        Das, node_id, part_number, number_of_channels))
                MINIPH5 = EXREC.filename

            n = 0
            trace_headers_list = []
            while True:
                if SD.isEOF():
                    if n != 0:
                        thl = []
                        chan_set = None
                        t = None
                        new_traces = []
                        for T in traces:
                            thl.append(T.headers)
                            if chan_set is None:
                                chan_set = T.headers.trace_header.channel_set
                            if chan_set == T.headers.trace_header.channel_set:
                                if isinstance(t, type(None)):
                                    t = T.trace
                                else:
                                    t = npappend(t, T.trace)
                            else:
                                new_traces.append(T)

                        traces = new_traces
                        process_traces(SD.reel_headers, thl[0], t)
                        if DAS_INFO:
                            writeINDEX()
                    break

                try:
                    trace, cs = SD.process_trace()
                except segdreader.InputsError as e:
                    LOGGER.error("{0}\n".format(F))
                    LOGGER.error("Possible bad SEG-D file -- {0}".format(
                        "".join(e.message)))
                    break

                if not LAT and not LON:
                    try:
                        if UTM:
                            #   UTM
                            LAT, LON = utmcsptolatlon(
                                SD.trace_headers.trace_header_N[4].
                                receiver_point_Y_final / 10., SD.trace_headers.
                                trace_header_N[4].receiver_point_X_final / 10.)
                        elif TSPF:
                            #   Texas State Plane coordinates
                            LAT, LON = txncsptolatlon(
                                SD.trace_headers.trace_header_N[4].
                                receiver_point_Y_final / 10., SD.trace_headers.
                                trace_header_N[4].receiver_point_X_final / 10.)
                        else:
                            LAT = SD.trace_headers.trace_header_N[
                                4].receiver_point_Y_final / 10.
                            LON = SD.trace_headers.trace_header_N[
                                4].receiver_point_X_final / 10.
                    except Exception as e:
                        LOGGER.warning(
                            "Failed to convert location: {0}.\n".format(
                                e.message))

                trace_headers_list.append(SD.trace_headers)
                if n == 0:
                    traces.append(Trace(trace, SD.trace_headers))
                    n = 1
                    Das = get_das(SD)
                else:
                    traces.append(Trace(trace, SD.trace_headers))

                if n >= nleft or EVERY is True:
                    thl = []
                    chan_set = None
                    chan_set_next = None
                    t = None
                    new_traces = []
                    # Need to check for gaps here!
                    for T in traces:
                        thl.append(T.headers)
                        if chan_set is None:
                            chan_set = T.headers.trace_header.channel_set
                        if chan_set == T.headers.trace_header.channel_set:
                            if isinstance(t, type(None)):
                                t = T.trace
                            else:
                                t = npappend(t, T.trace)
                        else:
                            new_traces.append(T)
                            if chan_set_next is None:
                                chan_set_next =\
                                    T.headers.trace_header.channel_set

                    traces = new_traces
                    process_traces(SD.reel_headers, thl[0], t)
                    if new_traces:
                        nleft = APPEND - len(new_traces)
                    else:
                        nleft = APPEND
                    chan_set = chan_set_next
                    chan_set_next = None
                    if DAS_INFO:
                        writeINDEX()
                    n = 0
                    trace_headers_list = []
                    continue

                n += 1

            update_external_references()
            if TRACE_JSON:
                log_array, name = getLOG()
                for line in TRACE_JSON:
                    log_array.append(line)

            LOGGER.info(":<Finished>: {0}\n".format(F))

        write_arrays(ARRAY_T)
        seconds = time.time() - then

        try:
            EX.ph5close()
            EXREC.ph5close()
        except Exception as e:
            LOGGER.warning("{0}\n".format("".join(e.message)))

        LOGGER.info("Done...{0:b}".format(int(seconds / 6.)))
        logging.shutdown()
Пример #25
0
def load_PSZcatalog(unconf=False, full=False, extras=False, **kwargs):
    ''' Load the PSZ catalog data into a pandas dataframe. This is useful for
    getting the catalog data into other scripts in an easy way.

    By default, the script loads all *unique* entries in the combined PSZ1 and
    PSZ2 catalogs. The objects are updated to the PSZ2 values if they appear in
    both catalogs. This should be good enough for most applications where we
    want to include both confirmed and unconfirmed objects.

    Key options:

    unconf = True -- Gives *only* the unconfirmed objects in the PSZ catalogs
    full = True -- Gives the full catalogs instead of just the names and basic
                infomation
    extras = True -- Loads extra information from either (or both) the Barrena
    et al catalog, and denotes where we have mosaic/newfirm imaging.

    **kwargs -- whether to load Barrena AND/OR our catalog. Options are
    `barrena = True` and `us = True`

    returns a pandas dataframe.

    '''

    datapath = f'{os.environ["HOME"]}/Projects/planckClusters/catalogs'

    ps1 = Table.read(f'{datapath}/PSZ1v2.1.fits')
    ps2 = Table.read(f'{datapath}/PSZ2v1.fits')

    # convert to pandas
    df1 = ps1.to_pandas()
    df2 = ps2.to_pandas()

    if unconf:
        # only get unconfirmed sources
        df1 = df1.loc[df1['VALIDATION'] <= 3]
        df2 = df2.loc[df2['VALIDATION'] == -1]

    # clean up strings -- not required
    df1 = df1.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)
    df2 = df2.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)

    # merge the catalogs together
    df_m = df1.merge(df2,
                     how='outer',
                     left_on='INDEX',
                     right_on='PSZ',
                     suffixes=('_PSZ1', '_PSZ2'))

    # get the columns that we want
    if full:
        df_final = df_m
    else:
        cols = df_m.columns[[0, 1, 4, 5, 8, 29, 33, 34, 37, 38, 40, 51]]
        df_final = df_m[cols]

    # remerge to find bits that were missing
    df_final_bigger = df_final.merge(df2,
                                     how='left',
                                     left_on='INDEX_PSZ1',
                                     right_on='PSZ')
    # fill in nans
    for col in ['NAME', 'RA', 'DEC', 'SNR', 'REDSHIFT', 'INDEX']:
        df_final_bigger[col + '_PSZ2'] = \
            df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col])
    # fill in nans
    for col in ['NAME', 'RA', 'DEC', 'SNR', 'REDSHIFT', 'INDEX']:
        df_final_bigger[col + '_PSZ2'] = \
                df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col])

    for col in ['NAME', 'RA', 'DEC', 'SNR']:
        df_final_bigger[col] = \
                df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[
                    col + '_PSZ1'])

    df_final_bigger = df_final_bigger[npappend(
        df_final_bigger.columns[:13].values, ['NAME', 'RA', 'DEC', 'SNR'])]

    if extras:
        df_final_bigger = load_extras(df_final_bigger, **kwargs)

    return df_final_bigger
Пример #26
0
    def write_combined_data_netcdf(self):
        ncfile = ncdf(self.outputfile, 'w', format='NETCDF4')
        # description of the file
        ncfile.description = 'Hobby meteorologists data ' + self.inputdir
        ncfile.history = 'Created ' + time.ctime(time.time())
        # create time dimension
        timevar = ncfile.createDimension('time', None)
        # create time variable local time Europe/Amsterdam
        timeaxisLocal = zeros(len(self.data[self.dateUTCstring][1:]))
        # define UTC and local time-zone (hardcoded)
        from_zone = tz.gettz('UTC')
        to_zone = tz.gettz('Europe/Amsterdam')
        # convert time string to datetime object
        for idx in range(1, len(self.data[self.dateUTCstring])):
            # define time object from string
            timeObject = datetime.strptime(self.data[self.dateUTCstring][idx],
                                           '%Y-%m-%d %H:%M:%S')
            # tell timeObject that it is in UTC
            timeObject = timeObject.replace(tzinfo=from_zone)
            # time axis UTC
            try:
              timeaxis = npappend(timeaxis, ncdf_date2num(
                timeObject.replace(tzinfo=None),
                units='minutes since 2010-01-01 00:00:00',
                calendar='gregorian'))
            except NameError:
              timeaxis = ncdf_date2num(
                timeObject.replace(tzinfo=None),
                units='minutes since 2010-01-01 00:00:00',
                calendar='gregorian')

        # netcdf time variable UTC
        timevar = ncfile.createVariable('time', 'i4', ('time',),
                                        zlib=True)
        timevar[:] = timeaxis
        timevar.units = 'minutes since 2010-01-01 00:00:00'
        timevar.calendar = 'gregorian'
        timevar.standard_name = 'time'
        timevar.long_name = 'time in UTC'

        # write lon/lat variables if available
        if ((self.lat) and (self.lon)):
            lonvar = ncfile.createDimension('longitude', 1)
            lonvar = ncfile.createVariable('longitude', 'float32',('longitude',))
            lonvar.units = 'degrees_east'
            lonvar.axis = 'X'
            lonvar.standard_name = 'longitude'
            lonvar[:] = self.lon
            latvar = ncfile.createDimension('latitude', 1)
            latvar = ncfile.createVariable('latitude', 'float32',('latitude',))
            latvar.units = 'degrees_north'
            latvar.axis = 'Y'
            latvar.standard_name = 'latitude'
            latvar[:] = self.lat
        # create other variables in netcdf file
        for self.variable in self.data.keys():
            if self.variable not in [self.dateUTCstring, 'Time', '<br>', None]:
                # add variables in netcdf file
                # convert strings to npnan if array contains numbers
                if True in [utils.is_number(c)
                            for c in self.data[self.variable]]:
                    self.data[self.variable] = [npnan if isinstance(
                        utils.fitem(c), str) else utils.fitem(c) for c in self.data[
                            self.variable]]
                # check if variable is a string
                if not isinstance(self.data[self.variable][1], str):
                    # fill variable
                    if self.variable == 'SolarRadiationWatts/m^2':
                        #variableName = 'SolarRadiation'
                        continue
                    elif ((self.variable == 'TemperatureC') or
                          (self.variable == 'TemperatureF')):
                        variableName = 'temperature'
                    else:
                        variableName = self.variable
                    self.values = ncfile.createVariable(
                        variableName, type(self.data[self.variable][1]),
                        ('time',), zlib=True, fill_value=-999)
                else:
                    # string variables cannot have fill_value
                    self.values = ncfile.createVariable(
                        self.variable, type(self.data[self.variable][1]),
                        ('time',), zlib=True)
                # TODO: km/h->m/s ??
                try:  # fill variable
                    if not self.variable in ['TemperatureC', 'TemperatureF']:
                      self.values[:] = self.data[self.variable][1:]
                    elif self.variable == 'TemperatureC':
                      self.values[:] = 273.15 + nparray(self.data[self.variable][1:])
                    elif self.variable == 'TemperatureF':
                      self.values[:] = (nparray(self.data[self.variable][1:]) - 32.)/1.8
                except IndexError:
                    # for strings the syntax is slightly different
                    self.values = self.data[self.variable][1:]
                self.fill_attribute_data()
        ncfile.close()