예제 #1
0
 def _data_update(artists, workspace):
     # errorbar with workspaces can only return a single container
     container_orig = artists[0]
     # It is not possible to simply reset the error bars so
     # we have to plot new lines but ensure we don't reorder them on the plot!
     orig_idx = self.containers.index(container_orig)
     container_orig.remove()
     # The container does not remove itself from the containers list
     # but protect this just in case matplotlib starts doing this
     try:
         self.containers.remove(container_orig)
     except ValueError:
         pass
     # this gets pushed back onto the containers list
     container_new = plotfunctions.errorbar(self, workspace,
                                            **kwargs)
     self.containers.insert(orig_idx, container_new)
     self.containers.pop()
     # update line properties to match original
     orig_flat, new_flat = cbook.flatten(
         container_orig), cbook.flatten(container_new)
     for artist_orig, artist_new in zip(orig_flat, new_flat):
         artist_new.update_from(artist_orig)
     # ax.relim does not support collections...
     self._update_line_limits(container_new[0])
     self.autoscale()
     return container_new
예제 #2
0
def get_subset(df, settings, dd_name, quiet=False):
    """
    Select only those columns specified under settings.
    Optionaly

    Parameters
    ----------

    df : DataFrame
    settings : dictionary with "dd_to_vars" column
    dd_name : str. for the lookup.
    quiet: Bool.  If True will print, but not raise, on some columns
    from settings not being in the df's columns.

    Returns
    -------

    subset : DataFrame.
    """
    cols = {x for x in flatten(settings["dd_to_vars"][dd_name].values())}
    good_cols = {
        x
        for x in flatten(settings["dd_to_vars"]["jan2013"].values())
    }
    all_cols = cols.union(good_cols)
    subset = df.columns.intersection(all_cols)

    if not quiet:
        print("Implicitly dropping {}".format(
            cols.symmetric_difference(subset)))

    return df[subset]
예제 #3
0
def get_subset(df, settings, dd_name, quiet=False):
    """
    Select only those columns specified under settings.
    Optionaly

    Parameters
    ----------

    df : DataFrame
    settings : dictionary with "dd_to_vars" column
    dd_name : str. for the lookup.
    quiet: Bool.  If True will print, but not raise, on some columns
    from settings not being in the df's columns.

    Returns
    -------

    subset : DataFrame.
    """
    cols = {x for x in flatten(settings["dd_to_vars"][dd_name].values())}
    good_cols = {x for x in flatten(settings["dd_to_vars"]["jan2013"].values())}
    all_cols = cols.union(good_cols)
    subset = df.columns.intersection(all_cols)

    if not quiet:
        print("Implicitly dropping {}".format(cols.symmetric_difference(subset)))

    return df[subset]
    def handle_returning_base_goals(self, data=None):
        # If input is received, use it. Otherwise, use data from above.
        if data != None:
            score_sheet = copy.copy(data)
        else:
            score_sheet = copy.copy(self.score_sheet)

        # I only want to output the configuration with the best score, so first I grab it from the score sheet.
        best_score_cfg = score_sheet[0, 0]
        best_score_score = score_sheet[0, 1]

        pr2_base_output = []
        configuration_output = []

        # Outputs the best location for the pr2
        # base and the best "other" configurations in two separate lists.
        # Format of output is:
        # [x (m), y (m), theta (radians)], [pr2_z_axis (cm), autobed_height (cm), autobed_headrest_angle (radians)]
        # The current output for the robot base location is the transformation from the goal position for the robot base
        # to the AR tag.
        # For a task with a solution of multiple configurations, each configuration will be appended to the previous list.
        # E.g. [x1, y1, th1, x2, y2, th2] where the first three entries correspond to the first configuration.
        for i in xrange(len(best_score_cfg[0])):
            origin_B_goal = np.matrix([[m.cos(best_score_cfg[2][i]), -m.sin(best_score_cfg[2][i]), 0., best_score_cfg[0][i]],
                                       [m.sin(best_score_cfg[2][i]),  m.cos(best_score_cfg[2][i]), 0., best_score_cfg[1][i]],
                                       [0.,                      0.,                           1.,           0.],
                                       [0.,                      0.,                           0.,           1.]])
            pr2_B_goal = self.origin_B_pr2.I * origin_B_goal
            goal_B_ar = pr2_B_goal.I * self.pr2_B_ar
            pos_goal, ori_goal = Bmat_to_pos_quat(goal_B_ar)
            pr2_base_output.append([pos_goal[0], pos_goal[1], m.acos(pr2_B_goal[0, 0])])
            configuration_output.append([best_score_cfg[3][i], 100*best_score_cfg[4][i], np.degrees(best_score_cfg[5][i])])
        print 'Base selection service is done and has completed preparing its result.'
        return list(flatten(pr2_base_output)), list(flatten(configuration_output))
예제 #5
0
def get_gorod_sheremetevo(message):
    global gorod
    gorod = message.text
    marshrut = sorted(set(flatten(list(Sharik(0)['Маршрут']))))
    spisok = []
    sl = {}
    for i in marshrut:
        spisok.append(i.split('-')[1].strip())
    for i in range(len(marshrut)):
        sl[spisok[i]] = marshrut[i]

    marshrut_1 = sorted(set(flatten(list(Sharik(1)['Маршрут']))))
    spisok_1 = []
    sl_1 = {}
    for i in marshrut_1:
        spisok_1.append(i.split('-')[0].strip())
    for i in range(len(marshrut_1)):
        sl_1[spisok_1[i]] = marshrut_1[i]

    if action == 0 and gorod in spisok:
        data = Sharik(0)[Sharik(0)['Маршрут'] == sl[gorod]][[
            'Номер рейса'
        ]].to_dict('records')
        for element in data:
            for key, value in element.items():
                if value != '':
                    bot.send_message(message.chat.id,
                                     '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_REIS_NUMBER.value)
        bot.send_message(
            message.chat.id, 'Теперь введите номер вашего рейса\n'
            "Или нажмите /reset для отмены поиска")
    elif action == 1 and gorod in spisok_1:
        data = Sharik(1)[Sharik(1)['Маршрут'] == sl_1[gorod]][[
            'Номер рейса'
        ]].to_dict('records')
        for element in data:
            for key, value in element.items():
                if value != '':
                    bot.send_message(message.chat.id,
                                     '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_REIS_NUMBER.value)
        bot.send_message(
            message.chat.id, 'Теперь введите номер вашего рейса\n'
            "Или нажмите /reset для отмены поиска")
    elif (action == 0 and gorod not in spisok) or (action == 1
                                                   and gorod not in spisok_1):
        bot.send_message(message.chat.id,
                         "Таких рейсов на сегодня не запланировано.")
        keyboard = types.InlineKeyboardMarkup()
        url_button = types.InlineKeyboardButton(
            text="Посмотреть онлайн табло",
            url=
            'https://www.svo.aero/ru/timetable/departure?date=today&period=00:00-02:00&terminal=all'
        )
        keyboard.add(url_button)
        bot.send_message(message.chat.id,
                         "Предлагаю посмотреть онлайн табло.",
                         reply_markup=keyboard)
        dbworker.set_state(message.chat.id, config.States.S_AIRPORT.value)
예제 #6
0
def enter_reis_num_sheremetevo(message):
    global reis
    reis = message.text
    bot.send_message(message.chat.id,
                     'Спасибо. Сейчас я обработаю вашу информацию')
    x = Sharik(action)['Номер рейса']
    if reis in (sorted(list(flatten(list(x))))):
        for_sending = Sharik(action)[x == reis]
        py = for_sending.to_dict('records')[0]
        for key, value in py.items():
            if value != '':
                bot.send_message(message.chat.id, '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_AIRPORT.value)
    elif reis not in (sorted(list(flatten(list(x))))):
        bot.send_message(message.chat.id,
                         'К сожалению указанного рейса не существует')
        if action == 1:
            bot.send_message(
                message.chat.id,
                'Введите название города откуда должен вылететь самолёт по вашему рейсу. Например: Ларнака'
            )
            dbworker.set_state(message.chat.id, config.States.S_GOROD.value)
        elif action == 0:
            bot.send_message(
                message.chat.id,
                'Введите название города, в который ожидается выполнение рейса. Например: Краснодар'
            )
            dbworker.set_state(message.chat.id, config.States.S_GOROD.value)
예제 #7
0
 def _data_update(artists, workspace):
     # errorbar with workspaces can only return a single container
     container_orig = artists[0]
     # It is not possible to simply reset the error bars so
     # we have to plot new lines but ensure we don't reorder them on the plot!
     orig_idx = self.containers.index(container_orig)
     container_orig.remove()
     # The container does not remove itself from the containers list
     # but protect this just in case matplotlib starts doing this
     try:
         self.containers.remove(container_orig)
     except ValueError:
         pass
     # this gets pushed back onto the containers list
     container_new = plotfunctions.errorbar(self, workspace, **kwargs)
     self.containers.insert(orig_idx, container_new)
     self.containers.pop()
     # update line properties to match original
     orig_flat, new_flat = cbook.flatten(container_orig), cbook.flatten(container_new)
     for artist_orig, artist_new in zip(orig_flat, new_flat):
         artist_new.update_from(artist_orig)
     # ax.relim does not support collections...
     self._update_line_limits(container_new[0])
     self.autoscale()
     return container_new
예제 #8
0
    def handle_returning_base_goals(self, data=None):
        if data != None:
            score_sheet = copy.copy(data)
        else:
            score_sheet = copy.copy(self.score_sheet)

        # now = rospy.Time.now() + rospy.Duration(1.0)
        # self.listener.waitForTransform('/odom_combined', '/base_link', now, rospy.Duration(10))
        # (trans, rot) = self.listener.lookupTransform('/odom_combined', '/base_link', now)
        # odom_B_pr2 = createBMatrix(trans, rot)

        best_score_cfg = score_sheet[0, 0]
        best_score_score = score_sheet[0, 1]

        pr2_base_output = []
        configuration_output = []

        # The output is a list of floats that are the position and quaternions for the transform from the goal location
        # to the ar tag. It also outputs a list of floats that is [robot z axis, bed height, head rest angle (degrees)].
        for i in xrange(len(best_score_cfg[0])):
            origin_B_goal = np.matrix([[
                m.cos(best_score_cfg[2][i]), -m.sin(best_score_cfg[2][i]), 0.,
                best_score_cfg[0][i]
            ],
                                       [
                                           m.sin(best_score_cfg[2][i]),
                                           m.cos(best_score_cfg[2][i]), 0.,
                                           best_score_cfg[1][i]
                                       ], [0., 0., 1., 0.], [0., 0., 0., 1.]])
            pr2_B_goal = self.origin_B_pr2.I * origin_B_goal
            goal_B_ar = pr2_B_goal.I * self.pr2_B_ar
            pos_goal, ori_goal = Bmat_to_pos_quat(goal_B_ar)
            # odom_B_goal = odom_B_pr2 * self.origin_B_pr2.I * origin_B_goal
            # pos_goal, ori_goal = Bmat_to_pos_quat(odom_B_goal)
            pr2_base_output.append(
                [pos_goal[0], pos_goal[1],
                 m.acos(pr2_B_goal[0, 0])])
            configuration_output.append([
                best_score_cfg[3][i], 100 * best_score_cfg[4][i],
                np.degrees(best_score_cfg[5][i])
            ])

            ## I no longer return posestamped messages. Now I return a list of floats.
            # psm = PoseStamped()
            # psm.header.frame_id = '/odom_combined'
            # psm.pose.position.x=pos_goal[0]
            # psm.pose.position.y=pos_goal[1]
            # psm.pose.position.z=pos_goal[2]
            # psm.pose.orientation.x=ori_goal[0]
            # psm.pose.orientation.y=ori_goal[1]
            # psm.pose.orientation.z=ori_goal[2]
            # psm.pose.orientation.w=ori_goal[3]
            # #print 'The quaternion to the goal location #',i,' is: \n',psm
            # output.append(psm)
        print 'Base selection service is done and has output a result.'
        ## Format of output is a list. Output is position [x,y,z] then quaternion [x,y,z,w] for each base location
        # (could output multiple base locations). So each set of 7 values is for one base location.
        return list(flatten(pr2_base_output)), list(
            flatten(configuration_output))
예제 #9
0
    def _makeplot(
        self,
        ax,
        fig,
        data,
        ymin=None,
        ymax=None,
        height=6,
        width=6,
        dos=None,
        color=None,
    ):
        """Utility method to tidy phonon band structure diagrams."""
        # Define colours
        if color is None:
            color = "C0"  # Default to first colour in matplotlib series

        # set x and y limits
        tymax = ymax if (ymax is not None) else max(flatten(data["frequency"]))
        tymin = ymin if (ymin is not None) else min(flatten(data["frequency"]))
        pad = (tymax - tymin) * 0.05

        if ymin is None:
            ymin = 0 if tymin >= self.imag_tol else tymin - pad
        ymax = ymax if ymax else tymax + pad

        ax.set_ylim(ymin, ymax)
        ax.set_xlim(0, data["distances"][-1][-1])

        if ymin < 0:
            dashline = True
            ax.axhline(
                0,
                color=rcParams["grid.color"],
                linestyle="--",
                dashes=dashes,
                zorder=0,
                linewidth=rcParams["ytick.major.width"],
            )
        else:
            dashline = False

        if dos is not None:
            self._plot_phonon_dos(dos,
                                  ax=fig.axes[1],
                                  color=color,
                                  dashline=dashline)
        else:

            # keep correct aspect ratio; match axis to canvas
            x0, x1 = ax.get_xlim()
            y0, y1 = ax.get_ylim()

            if width is None:
                width = rcParams["figure.figsize"][0]
            if height is None:
                height = rcParams["figure.figsize"][1]
            ax.set_aspect((height / width) * ((x1 - x0) / (y1 - y0)))
예제 #10
0
def setp(h, *args, **kwargs):
    """
    matplotlib supports the use of :func:`setp` ("set property") and
    :func:`getp` to set and get object properties, as well as to do
    introspection on the object.  For example, to set the linestyle of a
    line to be dashed, you can do::
      >>> line, = plot([1,2,3])
      >>> setp(line, linestyle='--')
    If you want to know the valid types of arguments, you can provide the
    name of the property you want to set without a value::
      >>> setp(line, 'linestyle')
          linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
    If you want to see all the properties that can be set, and their
    possible values, you can do::
      >>> setp(line)
          ... long output listing omitted
    :func:`setp` operates on a single instance or a list of instances.
    If you are in query mode introspecting the possible values, only
    the first instance in the sequence is used.  When actually setting
    values, all the instances will be set.  E.g., suppose you have a
    list of two lines, the following will make both lines thicker and
    red::
      >>> x = arange(0,1.0,0.01)
      >>> y1 = sin(2*pi*x)
      >>> y2 = sin(4*pi*x)
      >>> lines = plot(x, y1, x, y2)
      >>> setp(lines, linewidth=2, color='r')
    :func:`setp` works with the matlab(TM) style string/value pairs or
    with python kwargs.  For example, the following are equivalent::
      >>> setp(lines, 'linewidth', 2, 'color', r')  # matlab style
      >>> setp(lines, linewidth=2, color='r')       # python style
    """
    insp = ArtistInspector(h)
    if len(kwargs) == 0 and len(args) == 0:
        print "\n".join(insp.pprint_setters())
        return
    if len(kwargs) == 0 and len(args) == 1:
        print insp.pprint_setters(prop=args[0])
        return
    if not cbook.iterable(h):
        h = [h]
    else:
        h = cbook.flatten(h)
    if len(args) % 2:
        raise ValueError("The set args must be string, value pairs")
    funcvals = []
    for i in range(0, len(args) - 1, 2):
        funcvals.append((args[i], args[i + 1]))
    funcvals.extend(kwargs.items())
    ret = []
    for o in h:
        for s, val in funcvals:
            s = s.lower()
            funcName = "set_%s" % s
            func = getattr(o, funcName)
            ret.extend([func(val)])
    return [x for x in cbook.flatten(ret)]
예제 #11
0
 def recurse(s, l, w):
     for ww, ll in zip(w, l):
         if type(ww) is list:
             for e in flatten(ll):
                 s.Add((ww[0] == 0) <= (e == 0))
             recurse(s, ll[1:], ww[1:])
         else:
             for e in flatten(ll):
                 s.Add((ww == 0) <= (e == 0))
예제 #12
0
 def recurse(s, l, w):
     for ww, ll in zip(w, l):
         if type(ww) is list:
             for e in flatten(ll):
                 s.Add((ww[0] == 0) <= (e == 0))
             recurse(s, ll[1:], ww[1:])
         else:
             for e in flatten(ll):
                 s.Add((ww == 0) <= (e == 0))
예제 #13
0
def get_gorod_vnukovo(message):
    global gorod
    gorod = message.text
    all_cities = sorted(
        set(flatten(list(vnukovo(pri)['Город отправления (аэропорт)']))))
    new = []
    for i in all_cities:
        new.append(i.split()[0])
    slovar = {}
    for i in range(len(new)):
        slovar[new[i]] = all_cities[i]
    all_cities_1 = sorted(
        set(flatten(list(vnukovo(vil)['Город назначения (аэропорт)']))))
    new_1 = []
    for i in all_cities_1:
        new_1.append(i.split()[0])
    slovar_1 = {}
    for i in range(len(new_1)):
        slovar_1[new_1[i]] = all_cities_1[i]
    if action == pri and gorod in new:
        data = vnukovo(pri)[vnukovo(pri)['Город отправления (аэропорт)'] ==
                            slovar[gorod]][['Рейс']].to_dict('records')
        for element in data:
            for key, value in element.items():
                if value != '':
                    bot.send_message(message.chat.id,
                                     '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_REIS_NUMBER.value)
        bot.send_message(
            message.chat.id, 'Теперь введите номер вашего рейса\n'
            "Или нажмите /reset для отмены поиска")
    elif action == vil and gorod in new_1:
        data = vnukovo(vil)[vnukovo(vil)['Город назначения (аэропорт)'] ==
                            slovar_1[gorod]][['Рейс']].to_dict('records')
        for element in data:
            for key, value in element.items():
                if value != '':
                    bot.send_message(message.chat.id,
                                     '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_REIS_NUMBER.value)
        bot.send_message(
            message.chat.id, 'Теперь введите номер вашего рейса\n'
            "Или нажмите /reset для отмены поиска")
    elif (action == vil and gorod not in new_1) or (action == pri
                                                    and gorod not in new):
        bot.send_message(message.chat.id,
                         "Таких рейсов на сегодня не запланировано.")
        keyboard = types.InlineKeyboardMarkup()
        url_button = types.InlineKeyboardButton(
            text="Посмотреть онлайн табло",
            url='http://www.vnukovo.ru/flights/online-timetable/#tab-sortie')
        keyboard.add(url_button)
        bot.send_message(message.chat.id,
                         "Предлагаю посмотреть онлайн табло.",
                         reply_markup=keyboard)
        dbworker.set_state(message.chat.id, config.States.S_AIRPORT.value)
예제 #14
0
def map_indicator(df):
    mapped_df = df.apply(do_map_indicator, axis=1)
    mapped_columns = mapped_df.columns.map(lambda x: [x + '_idt', x])

    new_val = list(map(lambda x: list(flatten(x)), mapped_df.values))
    new_col = list(flatten(mapped_columns))

    output_df = pd.DataFrame(new_val, columns=new_col, index=df.index)

    return output_df
예제 #15
0
def calc_fscore(r, p):
    """
	given recall and precision arrays, calculate the f-measure (f-score)
	"""
    a = array(zip(flatten(r), flatten(p)))
    r, p = a[:, 0], a[:, 1]
    idx = where(r)
    r, p = r[idx], p[idx]
    F = (2 * p * r / (p + r)).mean()
    return F
예제 #16
0
def calc_fscore(r,p):
	"""
	given recall and precision arrays, calculate the f-measure (f-score)
	"""
	a = array(zip(flatten(r),flatten(p)))
	r,p = a[:,0],a[:,1]
	idx = where(r)
	r,p = r[idx],p[idx]
	F = (2*p*r/(p+r)).mean()
	return F
예제 #17
0
 def fit_2dgauss(self, x, y):
     runs = self.item.datas
     if self.item.settings.usemask:
         xss = array(
             list(flatten(run['col_' + x][run.mask] for run in runs)))
         yss = array(
             list(flatten(run['col_' + y][run.mask] for run in runs)))
         zss = array(list(flatten(run.y[run.mask] for run in runs)))
         dzss = array(list(flatten(run.dy[run.mask] for run in runs)))
     else:
         xss = array(list(flatten(run['col_' + x] for run in runs)))
         yss = array(list(flatten(run['col_' + y] for run in runs)))
         zss = array(list(flatten(run.y for run in runs)))
         dzss = array(list(flatten(run.dy for run in runs)))
     maxidx = zss.argmax()
     xdata = array((xss, yss)).T
     model = Gauss2D(pos_x=xss[maxidx],
                     pos_y=yss[maxidx],
                     fwhm_x=0.5 * (xss.max() - xss.min()),
                     fwhm_y=0.5 * (yss.max() - yss.min()),
                     ampl=zss[maxidx])
     data = Dataset.from_arrays('2dgauss', xdata, zss, dzss)
     res = model.fit(data)
     xx, yy = mgrid[xss.min():xss.max():100j, yss.min():yss.max():100j]
     mesh = array((xx.ravel(), yy.ravel())).T
     zmesh = model.fcn(res.paramvalues, mesh).reshape((100, 100))
     ax = self.canvas.figure.gca()
     ax.contour(xx, yy, zmesh)
     self.fitParLbl.setText('pos_x: %(pos_x).5f  pos_y: %(pos_y).5f  '
                            'theta: %(theta).5f  '
                            'fwhm_x: %(fwhm_x).5f  fwhm_y: %(fwhm_y).5f  '
                            'ampl: %(ampl).5f' % res.paramvalues)
예제 #18
0
def evaluate_ephys(chan_fronts, sync_fronts, show_plots=SHOW_PLOTS):
    """
    check number of detected square pulses and temporal jitter
    """

    # check if all signals have been detected
    L_sync_up = list(flatten(sync_fronts['fpga up fronts']))
    L_sync_down = list(flatten(sync_fronts['fpga down fronts']))

    assert len(L_sync_up) == 500, 'not all fpga up fronts detected'
    assert len(L_sync_down) == 500, 'not all fpga down fronts detected'

    for i in range(len(chan_fronts)):

        try:

            L_chan_up = list(flatten(chan_fronts[i]['ephys up fronts']))
            L_chan_down = list(flatten(chan_fronts[i]['ephys down fronts']))

            assert len(L_chan_up) == 500, \
                'not all ephys up fronts detected'
            assert len(L_chan_down) == 500, \
                'not all ephys down fronts detected'

            break

        except BaseException:

            continue

    ups_errors = np.array(L_chan_up) - np.array(L_sync_up)
    downs_errors = np.array(L_chan_down) - np.array(L_sync_down)

    MAX = max([max(ups_errors), max(downs_errors)])

    if MAX > 20:
        print('ATTENTION, the maximal error is unusually high, %s sec' %
              str(MAX / 30000.))

    print('ephys test passed')

    if show_plots:
        plt.figure('histogram')

        #  pool up front and down front temporal errors
        Er = [np.array(ups_errors), np.array(downs_errors)]
        f = np.reshape(Er, 1000) / 30000.

        plt.hist(f)
        plt.xlabel('error between fpga fronts and ephys fronts in sec')
예제 #19
0
def get_gorod_domodedovo(message):
    global gorod
    gorod = message.text.upper()
    if action == prilet and gorod in ', '.join(
            sorted(set(flatten(list(stat(prilet)['Аэропорт отправления']))))):
        data = stat(prilet)[stat(prilet)['Аэропорт отправления'] == gorod][[
            '№ Рейса'
        ]].to_dict('records')
        for element in data:
            for key, value in element.items():
                if value != '':
                    bot.send_message(message.chat.id,
                                     '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_REIS_NUMBER.value)
        bot.send_message(
            message.chat.id, 'Теперь введите номер вашего рейса\n'
            "Или нажмите /reset для отмены поиска")
    elif action == vilet and gorod in ', '.join(
            sorted(set(flatten(list(stat(vilet)['Аэропорт назначения']))))):
        data = stat(vilet)[stat(vilet)['Аэропорт назначения'] == gorod][[
            '№ Рейса'
        ]].to_dict('records')
        for element in data:
            for key, value in element.items():
                if value != '':
                    bot.send_message(message.chat.id,
                                     '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_REIS_NUMBER.value)
        bot.send_message(
            message.chat.id, 'Теперь введите номер вашего рейса\n'
            "Или нажмите /reset для отмены поиска")
    elif gorod not in ', '.join(
            sorted(set(flatten(list(stat(vilet)['Аэропорт назначения']))))
    ) or gorod not in ', '.join(
            sorted(set(flatten(list(stat(prilet)['Аэропорт отправления']))))):
        bot.send_message(
            message.chat.id,
            "Таких рейсов на сегодня не запланировано.")  #Дописать условия
        keyboard = types.InlineKeyboardMarkup()
        url_button = types.InlineKeyboardButton(
            text="Посмотреть онлайн табло",
            url=
            'https://www.dme.ru/book/live-board/?searchText=&column=4&sort=1&start=0&end=4440&direction=A&page=1&count=&isSlider=1)'
        )
        keyboard.add(url_button)
        bot.send_message(message.chat.id,
                         "Предлагаю посмотреть онлайн табло.",
                         reply_markup=keyboard)
        dbworker.set_state(message.chat.id, config.States.S_AIRPORT.value)
예제 #20
0
def evaluate_classifier(fname='saved_data.pickle', use_pca=True, null_clf=False, eps=finfo(float).eps, clip=-100):
	"""
	Gaussian classifier for non-tuned / autotuned equal-temparement magnitudes
	"""
	with open(fname,'rb') as f:
		data = pickle.load(f)
	a0 = array([[dd['nontuned_mags'] for dd in d] for d in data[1::2]])
	a1 = array([[dd['autotuned_mags'] for dd in d] for d in data[1::2]])
	P,TP,FN,FP,TN,PR,RE = [],[],[],[],[],[],[]
	T0W0,T0W1,T1W0,T1W1 = [],[],[],[]
	for song in arange(len(a0)):
		# per-song precision / recall
		idx = setdiff1d(arange(len(a0)),[song])
		train0=dB(array([a for a in flatten(a0[idx])]))
		train1=dB(array([a for a in flatten(a1[idx])]))
		test0=dB(array([a for a in flatten(a0[song])]))
		test1=dB(array([a for a in flatten(a1[song])]))
		if use_pca:
			u,s,v = svd(array([train0,train1]).T,0)
			train0 = u[:,0]
			train1 = u[:,1]
			test = array([test0,test1]).T
			test = dot(dot(test,v.T),diag(1./s))
			test0 = test[:,0]
			test1 = test[:,1]
		m0,v0 = train0.mean(),train0.var()
		m1,v1 = train1.mean(),train1.var()
		P.append(len(test0))
		t1w0,t1w1 = log(eval_gauss(test1,m0,v0)+eps), log(eval_gauss(test1,m1,v1)+eps)
		t0w0,t0w1 = log(eval_gauss(test0,m0,v0)+eps), log(eval_gauss(test0,m1,v1)+eps)
		if clip!=0:
			t1w0[t1w0<clip]=clip
			t1w1[t1w1<clip]=clip
			t0w0[t0w0<clip]=clip
			t0w1[t0w1<clip]=clip
		T0W0.append(t0w0)
		T0W1.append(t0w1)
		T1W0.append(t1w0)
		T1W1.append(t1w1)
		TP.append(sum(t1w1>t1w0))
		FN.append(sum(t1w1<=t1w0))
		FP.append(sum(t0w1>t0w0))
		TN.append(sum(t0w1<=t0w0))
		prec,rec = calc_precrec(t0w0,t0w1,t1w0,t1w1,null_clf)
		PR.append(prec)
		RE.append(rec)
	F = calc_fscore(RE,PR)
	return {'P':array(P),'TP':array(TP),'FN':array(FN),'FP':array(FP),'TN':array(TN),
		'PR':PR,'RE':RE,'F':F, 'T0W0':T0W0,'T0W1':T0W1,'T1W0':T1W0,'T1W1':T1W1}
    def handle_returning_base_goals(self, data=None):
        if data != None:
            score_sheet = copy.copy(data)
        else:
            score_sheet = copy.copy(self.score_sheet)

        # now = rospy.Time.now() + rospy.Duration(1.0)
        # self.listener.waitForTransform('/odom_combined', '/base_link', now, rospy.Duration(10))
        # (trans, rot) = self.listener.lookupTransform('/odom_combined', '/base_link', now)
        # odom_B_pr2 = createBMatrix(trans, rot)

        best_score_cfg = score_sheet[0, 0]
        best_score_score = score_sheet[0, 1]

        pr2_base_output = []
        configuration_output = []

        # The output is a list of floats that are the position and quaternions for the transform from the goal location
        # to the ar tag. It also outputs a list of floats that is [robot z axis, bed height, head rest angle (degrees)].
        for i in xrange(len(best_score_cfg[0])):
            origin_B_goal = np.matrix([[m.cos(best_score_cfg[2][i]), -m.sin(best_score_cfg[2][i]), 0., best_score_cfg[0][i]],
                                       [m.sin(best_score_cfg[2][i]),  m.cos(best_score_cfg[2][i]), 0., best_score_cfg[1][i]],
                                       [0.,                      0.,                           1.,           0.],
                                       [0.,                      0.,                           0.,           1.]])
            pr2_B_goal = self.origin_B_pr2.I * origin_B_goal
            goal_B_ar = pr2_B_goal.I * self.pr2_B_ar
            pos_goal, ori_goal = Bmat_to_pos_quat(goal_B_ar)
            # odom_B_goal = odom_B_pr2 * self.origin_B_pr2.I * origin_B_goal
            # pos_goal, ori_goal = Bmat_to_pos_quat(odom_B_goal)
            pr2_base_output.append([pos_goal[0], pos_goal[1], m.acos(pr2_B_goal[0, 0])])
            configuration_output.append([best_score_cfg[3][i], 100*best_score_cfg[4][i], np.degrees(best_score_cfg[5][i])])

            ## I no longer return posestamped messages. Now I return a list of floats.
            # psm = PoseStamped()
            # psm.header.frame_id = '/odom_combined'
            # psm.pose.position.x=pos_goal[0]
            # psm.pose.position.y=pos_goal[1]
            # psm.pose.position.z=pos_goal[2]
            # psm.pose.orientation.x=ori_goal[0]
            # psm.pose.orientation.y=ori_goal[1]
            # psm.pose.orientation.z=ori_goal[2]
            # psm.pose.orientation.w=ori_goal[3]
            # #print 'The quaternion to the goal location #',i,' is: \n',psm
            # output.append(psm)
        print 'Base selection service is done and has output a result.'
        ## Format of output is a list. Output is position [x,y,z] then quaternion [x,y,z,w] for each base location
        # (could output multiple base locations). So each set of 7 values is for one base location.
        return list(flatten(pr2_base_output)), list(flatten(configuration_output))
예제 #22
0
파일: mcmc_chain.py 프로젝트: aeb/ThemisPy
def parse_parameter_arglist(arg_list):
    """
    Parses a list of lists of strings containing individual numbers (e.g., 0 1 4), 
    comma separated lists (e.g., 0, 2, 5), ranges (e.g., 0-3), or combinations thereof,
    and reduces them to a unique, ordered list of integers.  Such a list of parameter
    values arises naturally from ArgumentParser append, e.g., from
    ``parser.add_argument("-p", type=str, nargs='+', action='append')``.

    Args:
      arg_list (list): List of lists with strings denoting indexes or ranges of indexes.
    
    Returns:
      (list): Sorted, unique list of integers.
    """

    plist = []
    for arg in flatten(arg_list):
        tokens = arg.split(',')
        for token in tokens:
            if ('-' in token):  # range
                toks = token.split('-')
                plist.extend(list(range(int(toks[0]), int(toks[1]) + 1)))
            elif (token.isspace() or token == ''):  # whitespace or empty
                continue
            else:  # a number
                plist.extend([int(token)])
    plist = np.unique(np.sort(np.array(plist)))

    return list(plist)
예제 #23
0
def coverage(paths, xs, weights):
    """
    Computes coverage of map, using xs as features weighted by weights.
    """
    subset = list(set(cbook.flatten(paths)))
    total = (1 - np.prod(1 - xs[subset], axis=0)).dot(weights)
    return total
예제 #24
0
    def remove(self):
        for c in cbook.flatten(
                self, scalarp=lambda x: isinstance(x, martist.Artist)):
            c.remove()

        if self._remove_method:
            self._remove_method(self)
예제 #25
0
def is_empty(value):
    """
    Consider `value` as empty if fit following rules::

        - is None
        - is Sized with zero length
        - is pandas.DataFrame with zero length
        - is pandas.NaT
        - is numpy.nan

    :param value: Any
    :return: bool
    """
    if isinstance(value, type(None)):
        return True
    if isinstance(value, Sized):
        # this cover also strings
        return len(value) == 0 or len(list(flatten(value))) == 0
    if isinstance(value, pd.DataFrame):
        return value.empty
    if isinstance(value, type(pd.NaT)):
        return True
    if up.isnan(value):
        return True
    return False
예제 #26
0
파일: guazi.py 프로젝트: victorweiwei/guazi
def deal_info(url):
    response = requests.get(url, headers=head, timeout=(2, 2))
    html = etree.HTML(response.text)
    title = html.xpath("//h2[@class='titlebox']/text()")[0].strip()  #售车标题
    license_time = html.xpath("//*[@class='one']/span/text()")[0]  #上牌时间
    Kilometre_number = html.xpath("//*[@class='two']/span/text()")[0]  #公里数
    city = html.xpath("//*[@class='three']/span/text()")[0]  #上牌地
    displacement = html.xpath("//*[@class='three']/span/text()")[1]  #排量
    gear_box = html.xpath("//*[@class='last']/span/text()")[0]  #变速箱
    price = float(
        html.xpath("//*[@class='pricestype']/text()")[0].replace('¥',
                                                                 '')) * 10000
    base_param1 = html.xpath("//*[@class='td1']/text()")
    base_param2 = html.xpath("//*[@class='td2']/text()")
    from matplotlib.cbook import flatten
    base_param = list(flatten(zip(base_param1, base_param2)))
    param = ','.join(base_param)
    try:
        dataframe = pd.DataFrame({
            '售车标题': title,
            '上牌时间': license_time,
            '公里数': Kilometre_number,
            '上牌地': city,
            '排量': displacement,
            '变速箱': gear_box,
            '价格': price,
            '汽车参数': [param]
        })
        dataframe.to_csv("car.csv", index=False, sep=',', mode='a')
    except Exception as e:
        print(e)
예제 #27
0
    def gaussian_kernel(*shape):
        """
        Returns a 2D array with gaussian values to be used in the blob_outliers
        detection function. Can be anisotripic (oblong). Scaling is determined
        automatically.

        Parameters
        ----------
        shape : int, int
            if one integer is passed the kernel will be isotropic
            if two integers are passed the kernel will be anisotropic

        Returns
        -------
        array  (float)
            The 2D representation of the kernel
        """
        from matplotlib.cbook import flatten
        from numpy import exp, mgrid

        # make shape a list regardless of input
        shape = [int(a // 2) for a in flatten([shape])]
        # anisotropic if len(2) else isotropic
        if len(shape) == 1:
            sx, sy = shape[0], shape[0]
        elif len(shape) == 2:
            sx, sy = shape

        # create the x and y grid
        x, y = mgrid[-sx:sx + 1, -sy:sy + 1]
        sigma = [sx / 8, sy / 8]  # sigma scaled by shape
        c = tuple([sx, sy])  # centre index of x and y
        g = 1 * exp(-((x - x[c])**2 / (2 * sigma[0])**2 + (y - y[c])**2 /
                      (2 * sigma[1])**2))
        return g
예제 #28
0
    def remove(self):
        for c in cbook.flatten(
                self, scalarp=lambda x: isinstance(x, martist.Artist)):
            c.remove()

        if self._remove_method:
            self._remove_method(self)
예제 #29
0
def sample_model_bg(modelnames, bgnames, n_ex_per_model, n_ex, rand=0):
    """ Samples a list of models and backgrounds for building the
    dataset. The models will be repeated 'n_ex_per_model' times.
    
    modelnames: list of model names
    bgnames: list of background filenames
    n_ex_per_model: number of examples per model
    n_ex: total number of examples

    modellist: list of 'n_ex' models
    bglist: list of 'n_ex' background names
    """

    # Make model list
    modellist = [m for m in flatten(zip(*([modelnames] * n_ex_per_model)))]
    # Make background list
    rand = tools.init_rand(rand)
    bgrand = rand.randint(0, n_bg, n_ex)
    bglist = [bgnames[r] for r in bgrand]

    # Make category list
    # Model root directory
    model_path = os.path.join(gt.RESOURCE_PATH, "objs")
    # Get the model info that's contained in the scripts
    sys.path.append(model_path)
    model_categories = __import__("model_categories").MODEL_CATEGORIES
    # Assemble category info in dict with {modelname: category, ...}
    categories = []
    for categ, names in model_categories.iteritems():
        categories.extend([(name, categ) for name in names])
    categorydict = dict(categories)
    # The actual list
    categorylist = [categorydict[model] for model in modellist]

    return modellist, bglist, categorylist
예제 #30
0
파일: test.py 프로젝트: batxes/pol2
def generate_venn2(nA_list,nB_list,nBoth_list,nameA,nameB_list,layout_line,layout_col):
    data = []
    colors = []
    for i in range(len(nB_list)):
        tuple_ = (nA_list[i],nB_list[i],nBoth_list[i])
        data.append(tuple_)

    max_area = max(map(sum, data))
    colors=["green","green","green","green","red","red","red","red","red"]
    
    figure, axes = plt.subplots(layout_line, layout_col, figsize=(30,30))
    #figure, axes = plt.subplots(len(nB_list), 1, figsize=(12,12))
    #plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.01, hspace=0.01)
    line = 0
    column = 0
    for i in range(len(nB_list)):
        v = venn2(subsets=data[i], set_labels = (nameA.split("/")[-1][:20], nameB_list[i].split("/")[-1][:20]), ax=axes[line][column])
#        v = venn2(subsets=data[i], ax=axes[line][column])
        v.get_patch_by_id('100').set_color('lime')
        v.get_patch_by_id('010').set_color(colors[i])
        #plt.title(nameA+" vs "+nameB_list[i].split("/")[-1])
        column += 1
        print line,column
        if column == layout_col:
            line += 1
            column = 0
    for a, d in zip(flatten(axes), data):
        set_venn_scale(a, sum(d),max_area)
    #plt.show()
    plt.savefig("venn2.png")
예제 #31
0
        def create_time_slots(day):
            src_slots = dcal.get_working_times(day)
            slots = [0, src_slots, 24 * 60]
            slots = tuple(cbook.flatten(slots))
            slots = zip(slots[:-1], slots[1:])

            #balance non working slots
            work_time = slot_sum(src_slots)
            non_work_time = sum_time - work_time

            non_slots = filter(lambda s: s not in src_slots, slots)
            non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
            non_slots.sort()

            slots = []
            i = 0
            for l, s in non_slots:
                delta = non_work_time / (len(non_slots) - i)
                delta = min(l, delta)
                non_work_time -= delta
                slots.append((s[0], s[0] + delta))
                i += 1

            slots.extend(src_slots)
            slots.sort()
            return slots
예제 #32
0
        def create_time_slots(day):
            src_slots = dcal.get_working_times(day)
            slots = [0, src_slots, 24*60]
            slots = tuple(cbook.flatten(slots))
            slots = zip(slots[:-1], slots[1:])

            #balance non working slots
            work_time = slot_sum(src_slots)
            non_work_time = sum_time - work_time

            non_slots = filter(lambda s: s not in src_slots, slots)
            non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
            non_slots.sort()

            slots = []
            i = 0
            for l, s in non_slots:
                delta = non_work_time / (len(non_slots) - i)
                delta = min(l, delta)
                non_work_time -= delta
                slots.append((s[0], s[0] + delta))
                i += 1

            slots.extend(src_slots)
            slots.sort()
            return slots
예제 #33
0
 def annotations(self):
     from decimal import Decimal  # for formatting
     tex_key = {
         'tkin': 'T_K',
         'tex': 'T_{ex}',
         'ntot': 'N',
         'fortho': 'F_o',
         'width': '\\sigma',
         'xoff_v': 'v',
         'fillingfraction': 'FF',
         'tau': '\\tau_{1-1}',
         'background_tb': 'T_{BG}'
     }
     # small hack below: don't quantize if error > value.  We want to see the values.
     label_list = []
     for pinfo in self.parinfo:
         parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
         parnum = int(pinfo['parname'][-1])
         if pinfo['fixed']:
             formatted_value = "%s" % pinfo['value']
             pm = ""
             formatted_error = ""
         else:
             formatted_value = Decimal("%g" % pinfo['value']).quantize(
                 Decimal("%0.2g" % (min(pinfo['error'], pinfo['value']))))
             pm = "$\\pm$"
             formatted_error = Decimal("%g" % pinfo['error']).quantize(
                 Decimal("%0.2g" % pinfo['error']))
         label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value,
                                          pm, formatted_error)
         label_list.append(label)
     labels = tuple(mpcb.flatten(label_list))
     return labels
예제 #34
0
파일: model.py 프로젝트: allisony/pyspeckit
    def annotations(self, shortvarnames=None, debug=False):
        """
        Return a list of TeX-formatted labels

        The values and errors are formatted so that only the significant digits
        are displayed.  Rounding is performed using the decimal package.

        Parameters
        ----------
        shortvarnames : list
            A list of variable names (tex is allowed) to include in the
            annotations.  Defaults to self.shortvarnames

        Examples
        --------
        >>> # Annotate a Gaussian
        >>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
        """
        from decimal import Decimal  # for formatting

        svn = self.shortvarnames if shortvarnames is None else shortvarnames
        # if pars need to be replicated....
        if len(svn) < self.npeaks * self.npars:
            svn = svn * self.npeaks

        parvals = self.parinfo.values
        parerrs = self.parinfo.errors

        loop_list = [
            (
                parvals[ii + jj * self.npars + self.vheight],
                parerrs[ii + jj * self.npars + self.vheight],
                svn[ii + jj * self.npars],
                self.parinfo.fixed[ii + jj * self.npars + self.vheight],
                jj,
            )
            for jj in range(self.npeaks)
            for ii in range(self.npars)
        ]

        label_list = []
        for (value, error, varname, fixed, varnumber) in loop_list:
            log.debug(", ".join([str(x) for x in (value, error, varname, fixed, varnumber)]))
            if fixed or error == 0:
                label = "$%s(%i)$=%8s" % (
                    varname,
                    varnumber,
                    Decimal("%g" % value).quantize(Decimal("%0.6g" % (value))),
                )
            else:
                label = "$%s(%i)$=%8s $\\pm$ %8s" % (
                    varname,
                    varnumber,
                    Decimal("%g" % value).quantize(Decimal("%0.2g" % (min(np.abs([value, error]))))),
                    Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),
                )
            label_list.append(label)

        labels = tuple(mpcb.flatten(label_list))
        return labels
예제 #35
0
 def legend(self, *args, **kwargs):
     if len(args)==0:
         all_handles = _get_handles(self)
         for ax in self.parasites:
             all_handles.extend(_get_handles(ax))
         handles = []
         labels = []
         for handle in all_handles:
             label = handle.get_label()
             if (label is not None and
                 label != '' and not label.startswith('_')):
                 handles.append(handle)
                 labels.append(label)
         if len(handles) == 0:
             warnings.warn("No labeled objects found. "
                           "Use label='...' kwarg on individual plots.")
             return None
     elif len(args)==1:
         labels = args[0]
         handles = [h for h, label in zip(all_handles, labels)]
     elif len(args)==2:
         if is_string_like(args[1]) or isinstance(args[1], int):
             labels, loc = args
             handles = [h for h, label in zip(all_handles, labels)]
             kwargs['loc'] = loc
         else:
             handles, labels = args
     elif len(args)==3:
         handles, labels, loc = args
         kwargs['loc'] = loc
     else:
         raise TypeError('Invalid arguments to legend')
     handles = cbook.flatten(handles)
     self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
     return self.legend_
예제 #36
0
def subplot2():
    #子图相关设置
    figure, axes = plt.subplots(3, 3, figsize=(18.5,10.5))
    figure.suptitle('species distribution in bgi100 and mgi200')
    # print(axes)
    path = r'F:\BGI100_MGI200\BGI100_MGI200.data\1210species_vn.txt'
    df = pd.read_table(path)
    cols=df.columns.tolist()
    data = [tuple(df[col])[:-1] for col in cols[1:]]
    max_area = max(map(sum, data))
    
    # 根据数据设置子图大小
    def set_venn_scale(vd, ax, true_area, reference_area=max_area):
        sx = np.sqrt(float(reference_area)/true_area)
        sy = max(vd.radii)*1.5
        ax.set_xlim(-sx, sx)
        ax.set_ylim(-sy, sy)
        
    for a,d,col_name in zip(flatten(axes), data,cols[1:]):
        vd = venn3(subsets = d, set_labels = ('bgi100', 'mgi200(1)','mgi200(2)'), ax=a)
        a.set_title(col_name)
        # set_venn_scale(vd, a, sum(d))

    figure.tight_layout(pad=0.1)
    plt.show()
예제 #37
0
 def generate_features(self, depth, subsample_n):
     NA_VAL=-100
     if depth<1:
         return
     for relation in random.choice(self.relations.keys(), subsample_n, True):
         new_obs_for_rel= apply_transforms(self.relations, [relation], self.entities)
         for ob in frozenset(flatten(new_obs_for_rel)):
             self.new_features.append(lambda x, rel=relation, t=ob: 1 if is_in_relation(x, self.relations[rel], rel, t) else NA_VAL if len(is_in_relation(x, self.relations[rel], rel))==0 else 0 )
             self.new_justify.append('is in relation %s with %s'%(relation, ob))
         if depth==2:
             for relation2 in random.choice(self.relations.keys(), 1):
                 newer_obs= apply_transforms(self.relations, [relation2], new_obs_for_rel)
                 for ob in frozenset(flatten(newer_obs)):
                     self.new_features.append(lambda x, trans=[relation,relation2], t=ob: 1 if t in apply_transforms(self.relations, trans, [x])[0] else NA_VAL if len(apply_transforms(self.relations, trans, [x])[0])==0 else 0)
                     self.new_justify.append('is in relations %s,%s with %s'%(relation, relation2, ob))
     pass #if depth=1, go one relation. if depth=2 go 2 relations and so on...
예제 #38
0
def facet_plot(dframe, facets, props, ydata, layout=None, newfig=True, figsize=None,
               legend=True, individual_legends=False, hide_additional_axes=True, zorder='default', **kwargs):
    if newfig:
        nr_facets = len(dframe.groupby(facets))
        if layout is None:
            for i in range(2, nr_facets // 2):
                if nr_facets % i == 0:
                    layout = (nr_facets // i, i)
                    break
            if layout is None:
                n = int(np.ceil(nr_facets / 2))
                layout = (n, 2)
        fig, axs = plt.subplots(
            nrows=layout[0],
            ncols=layout[1],
            sharex=True, sharey=True, figsize=figsize
        )
        if hide_additional_axes:
            for ax in fig.axes[nr_facets:]:
                ax.set_axis_off()
    else:
        fig = plt.gcf()
        axs = fig.axes

    cycl = cycle(plt.rcParams['axes.prop_cycle'])
    prop_styles = {ps: next(cycl) for ps, _ in dframe.groupby(props)}

    if zorder is 'default':
        dz = 1
        zorder = 0
    elif zorder is 'reverse':
        dz = -1
        zorder = 0
    else:
        dz = 0

    if legend:
        ax0 = fig.add_subplot(111, frame_on=False, zorder=-9999)
        ax0.set_axis_off()
        plot_kwargs = kwargs.copy()
        for k in ['logx', 'logy', 'loglog']:
            plot_kwargs.pop(k, None)
        for l, p in prop_styles.items():
            ax0.plot([], label=str(l), **p, **plot_kwargs)
        ax0.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='x-small')
    for ax, (ps, df) in zip(flatten(axs), dframe.groupby(facets, squeeze=False)):
        for prop, df_prop in df.groupby(props):
            df_prop[ydata].plot(ax=ax, label=str(prop), zorder=zorder, **prop_styles[prop], **kwargs)
            zorder += dz
        #  ax.title(0.5, 0.1, '{},{}'.format(*ps), transform=ax.transAxes, fontsize='small')

        ax.set_title('; '.join([str(x) for x in ps]) if isinstance(ps, tuple) else str(ps), fontsize='x-small')
        if individual_legends:
            ax.legend(fontsize='x-small')

    plt.sca(ax)
    rect = (0, 0, 0.85, 1) if legend else (0, 0, 1, 1)
    plt.tight_layout(rect=rect, pad=0.1)
    return fig, axs
예제 #39
0
 def annotations(self):
     label_list = [(
             "$A(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
             "$x(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
             "$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars])
                       ) for jj in range(self.npeaks)]
     labels = tuple(mpcb.flatten(label_list))
     return labels
예제 #40
0
 def find_tagging(top_node, train_point):
     #finds tagging without the query func...
     if type(top_node.justify)==str and (top_node.justify.startswith('leafed') or top_node.justify.startswith('no')):
         return top_node.chosen_tag
     if train_point==[]:
         return find_tagging(top_node.left_son if []==top_node.left_son.objects[-1] else top_node.right_son, train_point)
     if train_point in list(flatten(top_node.left_son.objects)):
         return find_tagging(top_node.left_son, train_point)
     return find_tagging(top_node.right_son, train_point)
예제 #41
0
파일: core_nopd.py 프로젝트: mattflor/wspec
def linear_labels(nestedlabels):
    def reclabels(labels):
        lsh = utils.list_shape(labels)
        if len(lsh) == 1 or len(lsh) == 0:
            return labels       # list of strings
        first,rest = labels[0], labels[1:]
        return [[x] + reclabels(rest) for x in first]
    nested = reclabels(nestedlabels)
    return list(flatten(nested))
예제 #42
0
def plot_evaluation(stats, N=10.):
	figure()
	PR = array(zip(flatten(stats['RE']),flatten(stats['PR'])))
	PR[:,0] = fix(PR[:,0]*N)/float(N) # divide recall into deciles
	precrec = []
	for re in unique(PR[:,0]):
		p = PR[:,1][where(PR[:,0]==re)]
		precrec.append((re,p.mean(),p.std()/sqrt(len(p))))
		errorbar(x=re,y=p.mean(),yerr=p.std()/sqrt(len(p)),color='b')
		plot(re,p.mean(),'bx')
	precrec=array(precrec)
	plot(precrec[:,0],precrec[:,1],'b--')
	axis([-0.05,1.05,0,1.05])
	grid()
	title('ROC autotuned/non-tuned classifier',fontsize=20)
	xlabel('Recall (standardized deciles)', fontsize=16)
	ylabel('Precision', fontsize=16)
	text(.85,.95,'F1=%.2f'%stats['F'],fontsize=16)
	return precrec
예제 #43
0
def plot_evaluation(stats, N=10.):
    figure()
    PR = array(zip(flatten(stats['RE']), flatten(stats['PR'])))
    PR[:, 0] = fix(PR[:, 0] * N) / float(N)  # divide recall into deciles
    precrec = []
    for re in unique(PR[:, 0]):
        p = PR[:, 1][where(PR[:, 0] == re)]
        precrec.append((re, p.mean(), p.std() / sqrt(len(p))))
        errorbar(x=re, y=p.mean(), yerr=p.std() / sqrt(len(p)), color='b')
        plot(re, p.mean(), 'bx')
    precrec = array(precrec)
    plot(precrec[:, 0], precrec[:, 1], 'b--')
    axis([-0.05, 1.05, 0, 1.05])
    grid()
    title('ROC autotuned/non-tuned classifier', fontsize=20)
    xlabel('Recall (standardized deciles)', fontsize=16)
    ylabel('Precision', fontsize=16)
    text(.85, .95, 'F1=%.2f' % stats['F'], fontsize=16)
    return precrec
예제 #44
0
    def __init__(self, _haars, _norm=True):
        """
        Initialize an HaarLikeDescriptors object.

        :param _haars: list
          a list of feature descriptors. A feature descriptor is a list of points (row, column) in a normalized
          coordinate system ((0,0) -> (1,1)) describing the "positive" (black) patches from a Haar-like
          feature. All the patches not specified in this list are considered "negative" (white).
          The value corresponding to such a feature is the (weighted) sum of pixel intensities covered by
          "positive" patches from which the (weighted) sum of pixel intensities covered by "negative" patches
          is subtracted.

        See some examples at:
        - http://www.codeproject.com/Articles/27125/Ultra-Rapid-Object-Detection-in-Computer-Vision-Ap
        - http://en.wikipedia.org/wiki/Haar-like_features

        Examples of Haar-like features coding:

        - a Haar-like feature in which the left side is "positive" (*) and the right side "negative" (.):
          +-------+-------+
          |*******|.......|
          |*******|.......|
          |*******|.......|
          |*******|.......|
          |*******|.......|
          |*******|.......|
          +-------+-------+
          The corresponding coding is: [[(0.0, 0.0), (0.5, 1.0)]].

        - a Haar-like feature with diagonal "positive" (*) patches:
          +-------+-------+
          |*******|.......|
          |*******|.......|
          |*******|.......|
          +-------+-------+
          |.......|*******|
          |.......|*******|
          |.......|*******|
          +-------+-------+
          The corresponding coding is: [[(0.0, 0.0), (0.5, 0.5)], [(0.5, 0.5), (1.0, 1.0)]].

          :param _norm: boolean
            Should the features be normalized? (scale-independent?) Default: True


        """
        self.haars = _haars
        self.nfeats = len(_haars)
        self.norm = _norm

        # Check that all coordinates are between 0 and 1:
        if any([_p < 0.0 or _p > 1.0 for _p in flatten(_haars)]):
            raise ValueError("Improper Haar feature specification.")

        return
예제 #45
0
    def annotations(self, shortvarnames=None, debug=False):
        """
        Return a list of TeX-formatted labels

        The values and errors are formatted so that only the significant digits
        are displayed.  Rounding is performed using the decimal package.

        Parameters
        ----------
        shortvarnames : list
            A list of variable names (tex is allowed) to include in the
            annotations.  Defaults to self.shortvarnames

        Examples
        --------
        >>> # Annotate a Gaussian
        >>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
        """
        from decimal import Decimal  # for formatting
        svn = self.shortvarnames if shortvarnames is None else shortvarnames
        # if pars need to be replicated....
        if len(svn) < self.npeaks * self.npars:
            svn = svn * self.npeaks

        parvals = self.parinfo.values
        parerrs = self.parinfo.errors

        loop_list = [
            (parvals[ii + jj * self.npars + self.vheight],
             parerrs[ii + jj * self.npars + self.vheight],
             svn[ii + jj * self.npars],
             self.parinfo.fixed[ii + jj * self.npars + self.vheight], jj)
            for jj in range(self.npeaks) for ii in range(self.npars)
        ]

        label_list = []
        for (value, error, varname, fixed, varnumber) in loop_list:
            log.debug(", ".join(
                [str(x) for x in (value, error, varname, fixed, varnumber)]))
            if fixed or error == 0:
                label = ("$%s(%i)$=%8s" %
                         (varname, varnumber, Decimal("%g" % value).quantize(
                             Decimal("%0.6g" % (value)))))
            else:
                label = ("$%s(%i)$=%8s $\\pm$ %8s" % (
                    varname,
                    varnumber,
                    Decimal("%g" % value).quantize(
                        Decimal("%0.2g" % (min(np.abs([value, error]))))),
                    Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),
                ))
            label_list.append(label)

        labels = tuple(mpcb.flatten(label_list))
        return labels
예제 #46
0
    def __init__(self, _haars, _norm=True):
        """
        Initialize an HaarLikeDescriptors object.

        :param _haars: list
          a list of feature descriptors. A feature descriptor is a list of points (row, column) in a normalized
          coordinate system ((0,0) -> (1,1)) describing the "positive" (black) patches from a Haar-like
          feature. All the patches not specified in this list are considered "negative" (white).
          The value corresponding to such a feature is the (weighted) sum of pixel intensities covered by
          "positive" patches from which the (weighted) sum of pixel intensities covered by "negative" patches
          is subtracted.

        See some examples at:
        - http://www.codeproject.com/Articles/27125/Ultra-Rapid-Object-Detection-in-Computer-Vision-Ap
        - http://en.wikipedia.org/wiki/Haar-like_features

        Examples of Haar-like features coding:

        - a Haar-like feature in which the left side is "positive" (*) and the right side "negative" (.):
          +-------+-------+
          |*******|.......|
          |*******|.......|
          |*******|.......|
          |*******|.......|
          |*******|.......|
          |*******|.......|
          +-------+-------+
          The corresponding coding is: [[(0.0, 0.0), (0.5, 1.0)]].

        - a Haar-like feature with diagonal "positive" (*) patches:
          +-------+-------+
          |*******|.......|
          |*******|.......|
          |*******|.......|
          +-------+-------+
          |.......|*******|
          |.......|*******|
          |.......|*******|
          +-------+-------+
          The corresponding coding is: [[(0.0, 0.0), (0.5, 0.5)], [(0.5, 0.5), (1.0, 1.0)]].

          :param _norm: boolean
            Should the features be normalized? (scale-independent?) Default: True


        """
        self.haars = _haars
        self.nfeats = len(_haars)
        self.norm = _norm

        # Check that all coordinates are between 0 and 1:
        if any([_p < 0.0 or _p > 1.0 for _p in flatten(_haars)]):
            raise ValueError("Improper Haar feature specification.")

        return
예제 #47
0
 def annotations(self):
     label_list = [ (
             "$T_K(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
             "$T_{ex}(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
             "$N$(%i)=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars]),
             "$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[3+jj*self.npars],self.mpperr[3+jj*self.npars]),
             "$v(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[4+jj*self.npars],self.mpperr[4+jj*self.npars]),
             "$F_o(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[5+jj*self.npars],self.mpperr[5+jj*self.npars])
                       ) for jj in range(self.npeaks)]
     labels = tuple(mpcb.flatten(label_list))
     return labels
예제 #48
0
 def annotations(self):
     label_list = [ (
             "$T_K(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
             "$T_{ex}(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
             "$N$(%i)=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars]),
             "$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[3+jj*self.npars],self.mpperr[3+jj*self.npars]),
             "$v(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[4+jj*self.npars],self.mpperr[4+jj*self.npars]),
             "$F_o(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[5+jj*self.npars],self.mpperr[5+jj*self.npars])
                       ) for jj in range(self.npeaks)]
     labels = tuple(mpcb.flatten(label_list))
     return labels
예제 #49
0
def enter_reis_num_domodedovo(message):
    global reis
    spisok_reisov = []
    reis = message.text
    bot.send_message(message.chat.id,
                     'Спасибо. Сейчас я обработаю вашу информацию')
    x = stat(action)['№ Рейса']
    if reis in (sorted(list(flatten(list(x))))):
        for_sending = stat(action)[x == reis]
        py = for_sending.to_dict('records')[0]
        for key, value in py.items():
            if value != '':
                bot.send_message(message.chat.id, '{}: {}'.format(key, value))
        dbworker.set_state(message.chat.id, config.States.S_AIRPORT.value)
    else:

        for i in (sorted(set(flatten(list(x))))):
            if i.startswith(reis.split()[0]):
                spisok_reisov.append(i)
        if spisok_reisov != []:
            name = air(reis.split()[0])
            bot.send_message(
                message.chat.id, 'Номер рейса ввёден неверно.\n'
                'Вероятно Вы искали что-то из рейсов авиакомпании {}: {}'.
                format(name, '    '.join(spisok_reisov)))

        else:
            bot.send_message(message.chat.id,
                             'К сожалению указанного рейса не существует')
        if action == prilet:
            bot.send_message(
                message.chat.id,
                'Введите название города откуда должен вылететь самолёт по вашему рейсу. Например: Ларнака'
            )
            dbworker.set_state(message.chat.id, config.States.S_GOROD.value)
        elif action == vilet:
            bot.send_message(
                message.chat.id,
                'Введите название города, в который ожидается выполнение рейса. Например: Сочи'
            )
            dbworker.set_state(message.chat.id, config.States.S_GOROD.value)
def load_data_parallel(directory, do_fit2D = False, do_filtering=False):
    import os, re
    dirs = [os.path.join(directory,dr) for dr in os.listdir(directory) if re.match(r'[-+]?[0-9.]+ms',dr)]
    #from operator import concat
    #from functools import reduce
    res = lview.map(imp.single_directory_load, dirs ,[do_fit2D]*len(dirs), [do_filtering]*len(dirs))
    res.wait_interactive()
    all_data = list(flatten(res.result))
    # print outputs in each kernel
    print(''.join([x['stdout'] for x in res.metadata]))
    print('Total number of images: ', len(all_data))
    return all_data
 def warp_slow_to_fast(self, data):
     interpolated_data = []
     for i in xrange(len(data)-1):
         interpolated_data.append(data[i])
         interpolated_data.append(data[i] + 0.5*(data[i+1]-data[i]))
     interpolated_data.append(data[len(data)-1])
     interpolated_data = np.array(interpolated_data)
     warped_data = []
     for i in xrange(len(interpolated_data)):
         if i%3 == 0:
             warped_data.append(list(flatten([interpolated_data[i, 0]/1.5,interpolated_data[i,1:]])))
     return np.array(warped_data)
예제 #52
0
    def split(self, X, y=None, groups=None):
        X = np.array(X)
        partition = 2 * self.n_splits
        #ensure partition is smaller than the input split length
        if X.shape[0] < partition:
            partition = X.shape[0]
        partial_X_row = np.array_split(range(X.shape[0]), partition, axis=0)
        for ii in range(self.n_splits, partition):
            if self.fix_window:
                train_index = list(
                    flatten(
                        map(lambda x: partial_X_row[x],
                            range(ii - self.n_splits, ii))))
            else:
                train_index = list(
                    flatten(map(lambda x: partial_X_row[x], range(ii))))
            test_index = partial_X_row[ii]

            yield np.array(train_index,
                           dtype=np.int64), np.array(test_index,
                                                     dtype=np.int64)
예제 #53
0
파일: spikes.py 프로젝트: michaelerule/cgid
def get_isi_stats(spikes,epoch,FS=1000,BURST_CUTOFF_MS=10,MIN_NISI=100):
    '''
    Computes a statistical summary of an ISI distribution.
    Accepts a list of lists of spike times
    return burstiness, ISI_cv, mean_rate, KS, mode, burst_free_ISI_cv, burst_free_mean_rate, burst_free_mode
    '''
    event,start,stop = epoch
    duration = (stop-start)/float(FS)
    ISI_events = list(flatten(map(diff,spikes)))
    allisi = np.array(ISI_events)
    if len(ISI_events)<MIN_NISI: return None
    mode = FS/modefind(allisi)
    mean_rate = (sum(map(len,spikes))) / float(len(spikes)) / duration
    ISI_cv = np.std(allisi)/np.mean(allisi)
    KS = poisson_KS(allisi)
    burstiness = sum(allisi<BURST_CUTOFF_MS)/float(len(allisi))*100
    burst_free_spikes     = remove_bursts(spikes, duration=BURST_CUTOFF_MS)
    burst_free_ISI_events = list(flatten(map(diff,burst_free_spikes)))
    burst_free_allisi     = np.array(burst_free_ISI_events)
    burst_free_mode   = FS/modefind(burst_free_allisi)
    burst_free_mean_rate = (sum(map(len,burst_free_spikes))) / float(len(burst_free_spikes)) / duration
    burst_free_ISI_cv    = np.std(burst_free_allisi)/np.mean(burst_free_allisi)
    return burstiness, ISI_cv, mean_rate, KS, mode, burst_free_ISI_cv, burst_free_mean_rate, burst_free_mode
예제 #54
0
def findChildrenWithAtlasTerms(name,fullhierarchy):
    atlasterms=[]
    #print 'checking',name
    if fullhierarchy[name].has_key('children'):
        for child in fullhierarchy[name]['children']:
            if fullhierarchy[child].has_key('atlasterm'):
                atlasterms.append(fullhierarchy[child]['atlasterm'])
                #print 'found',atlasterms
            else:
                #print 'drilling down:',child
                at=findChildrenWithAtlasTerms(child,fullhierarchy)
                atlasterms.append(at)
                
    return list(flatten(atlasterms))
예제 #55
0
def greedy(paths, path, candidates, xs, weights):
    """
    Greedily choose the candidate with max coverage relative to map+path,
    and add to path. Uses xs as features with the given weights.
    """
    maxCoverage = -float('inf') #note that if weights are negative, we can have negative coverage!
    if len(candidates) == 0:
        return
    totalMap = list(set(cbook.flatten(paths))) + path
    for p in candidates:
        c = coverage(totalMap + [p], xs, weights)
        if c > maxCoverage:
            maxCoverage = c
            maxP = p
    path.append(maxP)
def create_test_data():

    filename = "production/test/test_data.fits"

    datacube = np.arange(120).reshape(4,5,6)

    key_bases = ['naxis', 'ctype', 'crval', 'cdelt', 'crpix']

    keys = list(flatten([[x+'1', x+'2', x+'3'] for x in key_bases]))

    header = Header.fromkeys(keys)
    header['ctype1'] = 'VELO-LSR'
    header['ctype2'] = 'GLON-CAR'
    header['ctype3'] = 'GLAT-CAR'

    writeto(open(filename, 'wb'), datacube, header=header)
예제 #57
0
파일: spikes.py 프로젝트: michaelerule/cgid
def get_spikes_session_filtered_by_epoch(session,area,unit,epoch):
    '''
    spike times from session.
    spike times outside of trials, from bad trials, and outside of epoch
    on good trials, are removed
    '''
    allspikes = []
    event,est,esp = epoch
    for tr in cgid.data_loader.get_good_trials(session):
        t     = cgid.data_loader.get_trial_event(session,area,tr,4)
        te    = cgid.data_loader.get_trial_event(session,area,tr,event)
        start = t+te+est
        stop  = t+te+esp
        sp = get_spikes_session_time(session,area,unit,start,stop)
        allspikes.append(sp)
    return np.array(list(flatten(allspikes)),dtype=int32)
예제 #58
0
 def get_new_table(self, test, test_ents, depth):
     all_words=set()
     for words in self.objects:
         all_words.update(words)
     new_features = list(flatten([self.new_features[i] for i in self.new_features.keys() if i<=depth]))
     self.table= zeros((len(self.objects), len(all_words)+len(new_features)))
     self.test= zeros((len(test), len(all_words)+len(new_features)))
     self.feature_names=[]
     for i,word in enumerate(all_words):
         self.table[:,i]= array([1 if (word in obj) else 0 for obj in self.objects])
         self.test[:, i]= array([1 if (word in obj) else 0 for obj in test])
         self.feature_names.append('has word:%s'%(word))
     for j,new_feature in enumerate(new_features):
         self.table[:, len(all_words)+j]= array([new_feature(ent) for ent in self.entities])
         self.test[:, len(all_words)+j]= array([new_feature(ent) for ent in test_ents])
     return self.table, self.tagging, self.test, len(new_features)
예제 #59
0
    def OPA_dictionary(self):
        OPAc = []
        for o in self.hws:
            if o.type == 'Curve':
                OPAc.append(o)
            elif o.type == 'OPA':
                OPAc.append(o.curve)
            else:
                OPAc.append(None)

        color_dics = []
        for i in range(len(self.hws)):
            if OPAc[i]:
                color_dics.append(dict())
                colors = set(flatten(self.grid[:,:,hardwares.index(self.hws[i])]))
                for color in colors:
                    color_dics[-1][color] = m_pos(OPAc[i],color)
            else:
                color_dics.append(None)
        self.cd = color_dics