Ejemplo n.º 1
0
def get_kids_qso_map(qsos, nside):
    map = get_map(qsos['RAJ2000'].values, qsos['DECJ2000'].values, nside=nside)

    mask = get_map(qsos['RAJ2000'].values, qsos['DECJ2000'].values, nside=256)
    mask = hp.ud_grade(mask, nside)
    mask[mask.nonzero()] = 1

    map = get_masked_map(map, mask)
    return map, mask
Ejemplo n.º 2
0
    def eval_epoch_linear_probe(self, eval_list, epoch, vocab_dict, print_sample =False):
        label_binarizer = sklearn.preprocessing.LabelBinarizer()
        label_binarizer.fit(range(self.target_vocab_size))
        vocab_dict_rev = {v: k for k, v in vocab_dict.items()}
        self.linear.eval()
        total_loss = list([])

        map_list = list([])
        ppl_list = list([])
        query_map_list = list([])
        query_ppl_list = list([])
        target_map_list = list([])
        target_ppl_list = list([])
        with torch.no_grad():
            for i, instance in enumerate(eval_list):
                labels_onehot, masks_onehot, labels, _ = self.get_training_labels(instance[self.query_indices],
                                                                                  instance[self.fact_indices],
                                                                                  instance[self.negative_indices])

                output_ = self.linear(instance[self.input_type].to(self.device))  # output size is (6600)
                output = nn.functional.sigmoid(output_)

                loss = self.get_loss(output, torch.tensor(labels_onehot, dtype=torch.float32).to(self.device),
                                     torch.tensor(masks_onehot, dtype=torch.float32).to(self.device))

                total_loss.append(loss.detach().cpu().numpy())

                map_list.append(get_map(output.detach().cpu().numpy(), labels))
                ppl_list.append(get_ppl(output.detach().cpu().numpy(), labels))

                query_labels_eval =  np.array(instance[self.query_indices])
                target_labels_eval = np.array(list(set(instance[self.fact_indices])-set(instance[self.query_indices])))

                if len(query_labels_eval)>0:
                    query_map_list.append(get_map(output.detach().cpu().numpy(), query_labels_eval))
                    query_ppl_list.append(get_ppl(output.detach().cpu().numpy(), query_labels_eval))
                if len(target_labels_eval)>0:
                    target_map_list.append(get_map(output.detach().cpu().numpy(), target_labels_eval))
                    target_ppl_list.append(get_ppl(output.detach().cpu().numpy(), target_labels_eval))

        result_dict = {"eval_loss":total_loss,
                       "avg map":map_list,
                       "avg ppl":ppl_list,
                       "query map:": query_map_list,
                       "query ppl:": query_ppl_list,
                       "target map:": target_map_list,
                       "target ppl:": target_ppl_list}
        print("-" * 20)
        result_summary = {x:sum(result_dict[x])/len(result_dict[x]) for x in result_dict.keys()}

        print(result_summary)

        return result_summary , result_dict
Ejemplo n.º 3
0
    def train_epoch_linear_probe(self, train_list, epoch, save_folder_path):

        self.linear.train()
        total_loss = 0
        random.shuffle(train_list)
        map_list = list([])
        ppl_list = list([])
        for i, instance in enumerate(train_list):
            self.optimizer.zero_grad()
            labels_onehot, masks_onehot, labels, label_masks = self.get_training_labels(instance[self.query_indices], instance[self.fact_indices], instance[self.negative_indices])

            output_ = self.linear(instance[self.input_type].to(self.device))  # output size is (6600)
            output = nn.functional.sigmoid(output_)

            loss = self.get_loss(output, torch.tensor(labels_onehot, dtype = torch.float32).to(self.device), torch.tensor(masks_onehot, dtype = torch.float32).to(self.device))
            loss.backward()
            self.optimizer.step()

            total_loss+=loss.detach().cpu().numpy()

            map_list.append(get_map(output.detach().cpu().numpy(), labels))
            ppl_list.append(get_ppl(output.detach().cpu().numpy(), labels))

            # if (i + 1) % 10 == 0:
            #     print("\tsample ",i+1, " loss:", total_loss/(i+1))

        print("epoch ", epoch,"\tbert total training loss:", total_loss/len(train_list))

        return total_loss/len(train_list)
Ejemplo n.º 4
0
def main():
    config_file_name = "/etc/openbaton/ems/conf.ini"
    log.debug(config_file_name)
    config = ConfigParser.ConfigParser()
    config.read(config_file_name)  #read config file
    _map = get_map(section='ems', config=config)  #get the data from map
    queue_type = _map.get("type")  #get type of the queue
    hostname = _map.get("hostname")
    username = _map.get("username")
    password = _map.get("password")
    autodel = _map.get("autodelete")
    heartbeat = _map.get("heartbeat")
    exchange_name = _map.get("exchange")
    queuedel = True
    if autodel == 'false':
        queuedel = False
    if not heartbeat:
        heartbeat = '60'
    if not exchange_name:
        exchange_name = 'openbaton-exchange'

    rabbit_credentials = pika.PlainCredentials(username, password)
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=_map.get("orch_ip"),
                                  credentials=rabbit_credentials,
                                  heartbeat_interval=int(heartbeat)))

    channel = connection.channel()

    channel.exchange_declare(exchange=exchange_name,
                             type="topic",
                             durable=True)
    channel.queue_declare(queue='ems.%s.register' % queue_type,
                          auto_delete=queuedel)
    channel.queue_declare(queue='vnfm.%s.actions' % hostname,
                          auto_delete=queuedel)
    channel.queue_bind(exchange=exchange_name,
                       queue='ems.%s.register' % queue_type)
    channel.queue_bind(exchange=exchange_name,
                       queue='vnfm.%s.actions' % hostname)
    channel.basic_publish(
        exchange='',
        routing_key='ems.%s.register' % queue_type,
        properties=pika.BasicProperties(content_type='text/plain'),
        body='{"hostname":"%s"}' % hostname)

    channel.basic_qos(prefetch_count=1)
    channel.basic_consume(thread_function, queue='vnfm.%s.actions' % hostname)
    print "Waiting for actions"
    channel.start_consuming()
Ejemplo n.º 5
0
def build_map_by_image(contents, filename):
    content_type, content_string = contents.split(',')

    decoded = base64.b64decode(content_string)
    try:
        image = Image.open(BytesIO(decoded))
    except Exception as e:
        print(e)
        return html.Div([
            'Произошла ошибка при обработке файла.'
        ])

    my_map = get_map(image)
    return my_map
Ejemplo n.º 6
0
 def add_fields(self):
     for key in self.player_data:
         #If map
         if type(self.player_data[key]) == type(dict()):
             default_map_data = get_map(key)
             default_node_attributes = list(default_map_data.node(data=True))[0][-1]
             default_edge_attributes = list(default_map_data.edges(data=True))[0][-1]
             for affiliation in self.player_data[key]:
                 player_node_attributes = list(self.player_data[key][affiliation].node(data=True))[0][-1]
                 player_edge_attributes = list(self.player_data[key][affiliation].edges(data=True))[0][-1]
                 #Nodes
                 for attribute in list(default_node_attributes.keys()):
                     if attribute not in list(player_node_attributes.keys()):
                         nx.set_node_attributes(self.player_data[key][affiliation], nx.get_node_attributes(default_map_data, attribute), attribute)
                 #Edges
                 for attribute in list(default_edge_attributes.keys()):
                     if attribute not in list(player_edge_attributes.keys()):
                         nx.set_edge_attributes(self.player_data[key][affiliation], nx.get_edge_attributes(default_map_data, attribute), attribute)
Ejemplo n.º 7
0
def get_lotss_map(lotss_data, data_release, mask_filename=None, nside=2048, cut_pixels=True, masked=True):
    counts_map = get_map(lotss_data['RA'].values, lotss_data['DEC'].values, nside=nside)
    if masked:
        if data_release == 1:
            mask = get_lotss_dr1_mask(nside)
        elif data_release == 2:
            mask = get_lotss_dr2_mask(nside, filename=mask_filename, cut_pixels=cut_pixels)
        else:
            raise Exception('Wrong LoTSS data release number')
    else:
        mask = None

    # Get noise in larger bins
    noise_map = get_aggregated_map(lotss_data['RA'].values, lotss_data['DEC'].values,
                                   lotss_data['Isl_rms'].values, nside=256, aggregation='mean')

    if masked:
        noise_map = get_masked_map(noise_map, hp.ud_grade(mask, nside_out=256))
        counts_map = get_masked_map(counts_map, mask)

    return counts_map, mask, noise_map
Ejemplo n.º 8
0
                y_1 = 180
                h_list = [1.0]
                polygon_list_list = utils.get_polygon_list_list(
                    h_list, y_0, y_1, x_0, x_1)

                combined_normal_df_positive_snps = utils.get_df_positive_snps(
                    child_snps, combined_original_df, new_json_tree_rows)
                combined_normal_df_without_other = utils.get_df_without_other(
                    combined_normal_df_positive_snps)
                if len(combined_normal_df_without_other.index) > 0:
                    final_df = combined_normal_df_without_other.copy()
                    if is_extended:
                        final_df = utils.get_df_extended(
                            combined_normal_df_without_other, str_number,
                            new_json_tree_rows, child_snps,
                            combined_original_df)
                        if final_df is None:
                            continue
                    utils.get_map(final_df, polygon_list_list, child_snps,
                                  target_snp, h_list, db, collection_name)
                    snps_list.append(target_snp)
                    utils.update_db_list(collection_name, db, snps_list)
                else:
                    print(
                        "В наборе данных combined_normal_df_without_other 0 строк!"
                    )
            else:
                print("У выбранного SNP нет дочерних SNP!")
            print("Завершена обработка SNP {}".format(target_snp))
            print(datetime.datetime.now())
Ejemplo n.º 9
0
def main():
    sleep_time = 1
    logging_dir='/var/log/openbaton/'
    #logging_dir = 'log/openbaton/'
    if not os.path.exists(logging_dir):
        os.makedirs(logging_dir)
    logging.basicConfig(filename=logging_dir + '/ems-receiver.log', level=logging.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M')
    config_file_name = "/etc/openbaton/openbaton-ems.properties"
    log.debug(config_file_name)
    config = ConfigParser.ConfigParser()
    config.read(config_file_name)  # read config file
    _map = get_map(section='ems', config=config)  # get the data from map
    queue_type = _map.get("type")  # get type of the queue
    hostname = _map.get("hostname")
    username = _map.get("username")
    password = _map.get("password")
    autodel = _map.get("autodelete")
    heartbeat = _map.get("heartbeat")
    broker_port = _map.get("broker_port")
    exchange_name = _map.get("exchange")
    virtual_host = _map.get("virtual_host")
    queuedel = True
    if autodel == 'false':
        queuedel = False
    if not heartbeat:
        heartbeat = '60'
    if not exchange_name:
        exchange_name = 'openbaton-exchange'
    if not broker_port:
        broker_port = "5672"
    if not virtual_host: 
        virtual_host = "/"
    if not queue_type:
        queue_type = "generic"
    log.info(
        "EMS configuration paramters are "
        "hostname: %s, username: %s, password: *****, autodel: %s, heartbeat: %s, exchange name: %s" % (
            hostname, username, autodel, heartbeat, exchange_name))
    rabbit_credentials = pika.PlainCredentials(username, password)
    while True:
        try:
            connection = pika.BlockingConnection(
                pika.ConnectionParameters(host=_map.get("broker_ip"), port=int(broker_port),
                                          virtual_host=virtual_host, credentials=rabbit_credentials, heartbeat_interval=int(heartbeat)))
            channel = connection.channel()
            #channel.exchange_declare(exchange=exchange_name, type="topic", durable=True)
            #channel.queue_declare(queue='ems.%s.register'%queue_type, auto_delete=queuedel)
            channel.queue_bind(exchange=exchange_name, queue='ems.%s.register' % queue_type)
            channel.queue_declare(queue='vnfm.%s.actions' % hostname, auto_delete=queuedel)
            channel.queue_bind(exchange=exchange_name, queue='ems.%s.register' % queue_type)
            channel.queue_bind(exchange=exchange_name, queue='vnfm.%s.actions' % hostname)
            channel.basic_publish(exchange='', routing_key='ems.%s.register' % queue_type,
                                  properties=pika.BasicProperties(content_type='text/plain'),
                                  body='{"hostname":"%s"}' % hostname)
            channel.basic_qos(prefetch_count=1)
            channel.basic_consume(thread_function, queue='vnfm.%s.actions' % hostname)
            channel.start_consuming()
        except Exception:
            # logging.exception('')
            time.sleep(sleep_time)
            if (sleep_time < 10):
                sleep_time = sleep_time + 1
            else:
                sleep_time = sleep_time + 10
Ejemplo n.º 10
0
def number_counts_pixels(data,
                         nside=58,
                         x_lim=None,
                         title=None,
                         legend_loc='upper left',
                         columns=BAND_COLUMNS):
    # Get mask for the whole dataset
    map, _, _ = get_map(data['RAJ2000'], data['DECJ2000'], nside=nside)
    mask_non_zero = np.nonzero(map)

    # Get x limit from all magnitudes
    if x_lim is None:
        m_min = int(math.floor(data[columns].values.min()))
        m_max = int(math.ceil(data[columns].values.max()))
        bins = np.arange(m_min, m_max + 1.0, 1.0)
    else:
        bins = np.arange(x_lim[0], x_lim[1] + 1.0, 1.0)

    bin_titles = [
        '({}, {}]'.format(bins[i], bins[i + 1])
        for i, _ in enumerate(bins[:-1])
    ]

    # Plot for every magnitude
    pixel_densities = pd.DataFrame()
    for band in columns:

        # Bin magnitudes
        data.loc[:, 'bin'] = pd.cut(data[band], bins, labels=False)

        # For each bin
        for i in range(len(bins) - 1):
            data_bin = data.loc[data['bin'] == i]

            # Get map
            map, _, _ = get_map(data_bin['RAJ2000'],
                                data_bin['DECJ2000'],
                                nside=nside)
            map_masked = map[mask_non_zero]
            pixel_densities = pixel_densities.append(pd.DataFrame({
                'pixel density':
                map_masked,
                'magnitude range':
                bin_titles[i],
                'magnitude':
                band
            }),
                                                     ignore_index=True)

    sns.catplot(x='magnitude range',
                y='pixel density',
                hue='magnitude',
                data=pixel_densities,
                kind='bar',
                aspect=1.7,
                height=5,
                legend_out=False,
                palette='cubehelix')
    plt.legend(loc=legend_loc, framealpha=1.0)
    plt.yscale('log')
    plt.title(title)
Ejemplo n.º 11
0
#! /usr/bin/python

from objects import BigDir
from utils import get_map

if len(sys.argv) != 2:
    print("Usage: add_file <device>")
    exit(1)

fd = open(sys.argv[1], "r+b")

fs_map = get_map(fd)

root_frag_id = fs_map.disc_record.root >> 8
root_sec_offset = fs_map.disc_record.root & 0xff

root_locations = fs_map.find_fragment(root_frag_id,
                                      fs_map.disc_record.root_size)

fd.seek((root_locations[0])[0])

root = BigDir(fd.read(fs_map.disc_record.root_size))
root.delete('Loader')
root.add('Loader', 0xffffc856, 0xeadfc18c, 50331648, 3, 0x300)
root.sequence += 1
root.show()
root.data()

fd.seek((root_locations[0])[0])
fd.write(root.data())
Ejemplo n.º 12
0
def number_counts(data_dict,
                  linear_data,
                  nside=128,
                  step=.1,
                  band_column='MAG_GAAP_r',
                  legend_loc='upper left',
                  legend_size=None):
    fig, ax = plt.subplots()
    to_plot_df = pd.DataFrame()
    x_col = pretty_print_magnitude(band_column)
    y_col = r'surface density (≤ m) [N / deg$^2$]'
    for i, (data_name, (data, map)) in enumerate(data_dict.items()):
        (ra_col, dec_col) = ('RAJ2000',
                             'DECJ2000') if 'RAJ2000' in data else ('RA',
                                                                    'DEC')

        mask_non_zero = np.nonzero(map)
        print('{} area: {:.2f} deg^2'.format(
            data_name,
            len(mask_non_zero[0]) * hp.nside2pixarea(nside, degrees=True)))

        m_min = int(math.ceil(data[band_column].min()))
        m_max = int(math.ceil(data[band_column].max()))
        magnitude_arr = np.arange(m_min, m_max + step, step)
        density_mean_arr, density_error_arr = [], []
        for m_max in magnitude_arr:
            data_m_max = data.loc[data[band_column] < m_max]
            map_m_max, _, _ = get_map(data_m_max[ra_col],
                                      data_m_max[dec_col],
                                      nside=nside)
            densities = map_m_max[mask_non_zero] / hp.nside2pixarea(
                nside, degrees=True)
            (mu, sigma) = stats.norm.fit(densities)
            density_mean_arr.append(mu)
            density_error_arr.append(sigma / math.sqrt(densities.shape[0]))

        to_plot_df = to_plot_df.append(pd.DataFrame({
            x_col:
            magnitude_arr,
            y_col:
            density_mean_arr,
            'error':
            density_error_arr,
            'data name': [data_name] * len(magnitude_arr),
        }),
                                       ignore_index=True)

    color_palette = get_cubehelix_palette(
        len(data_dict), reverse=True) if len(data_dict) > 1 else [(0, 0, 0)]
    sns.lineplot(x=x_col,
                 y=y_col,
                 data=to_plot_df,
                 hue='data name',
                 palette=color_palette,
                 style='data name',
                 markers=True)
    plot_linear_data(linear_data)

    ax = plt.gca()
    for i, data_name in enumerate(to_plot_df['data name'].unique()):
        to_plot_single_data = to_plot_df.loc[to_plot_df['data name'] ==
                                             data_name]
        lower = to_plot_single_data[
            y_col].values - to_plot_single_data['error'].values / 2
        upper = to_plot_single_data[
            y_col].values + to_plot_single_data['error'].values / 2
        ax.fill_between(to_plot_single_data[x_col],
                        lower,
                        upper,
                        color=color_palette[i],
                        alpha=0.2)

    plt.yscale('log')
    # handles, labels = ax.get_legend_handles_labels()
    prop = {'size': legend_size} if legend_size else {}
    ax.legend(loc=legend_loc, framealpha=1.0,
              prop=prop)  # handles=handles[1:], labels=labels[1:],
    plt.setp(ax.get_legend().get_texts(), fontsize='9')
    plt.show()
Ejemplo n.º 13
0
def spatial_number_density(data_dict,
                           nside=128,
                           z_bin_step=0.5,
                           z_bin_size=0.5,
                           cosmo_model=cosmo_wmap9,
                           z_max=None,
                           legend_size=None):
    volume_proportion = (hp.nside2pixarea(nside, degrees=True) / 41253.0)

    fig, ax = plt.subplots()
    to_plot_df = pd.DataFrame()
    x_col = 'z'
    y_col = r'spatial density [N / comoving Mpc$^3$]'
    for data_name, (data, map) in data_dict.items():
        z_column = 'Z' if 'Z' in data else 'Z_PHOTO'
        z_half_bin_size = z_bin_size / 2
        steps = np.arange(data[z_column].min() + z_half_bin_size,
                          data[z_column].max() + z_half_bin_size, z_bin_step)
        comoving_volumes = np.array([
            (cosmo_model.comoving_volume(step + z_half_bin_size) -
             cosmo_model.comoving_volume(step - z_half_bin_size)).value
            for step in steps
        ])
        mask_non_zero = np.nonzero(map)
        print('{} area: {:.2f} deg^2'.format(
            data_name,
            len(mask_non_zero[0]) * hp.nside2pixarea(nside, degrees=True)))

        density_v_max_mean, density_v_max_error = [], []
        (ra_col, dec_col) = ('RAJ2000',
                             'DECJ2000') if ('RAJ2000' in data) else ('RA',
                                                                      'DEC')
        for i, step in enumerate(steps):
            data_step = data.loc[(data[z_column] > step - z_half_bin_size)
                                 & (data[z_column] < step + z_half_bin_size)]
            step_map, _, _ = get_map(data_step[ra_col],
                                     data_step[dec_col],
                                     v=data_step['v_weight'].values,
                                     nside=nside)
            v_max_values = step_map[mask_non_zero] / comoving_volumes[
                i] / volume_proportion
            (mu, sigma) = stats.norm.fit(v_max_values)
            density_v_max_mean.append(mu)
            density_v_max_error.append(sigma /
                                       math.sqrt(v_max_values.shape[0]))

        density_v_max_mean = np.array(density_v_max_mean)
        density_v_max_error = np.array(density_v_max_error)

        # comoving_v_max_densities = (density_v_max_mean / comoving_volumes / volume_proportion)

        to_plot_df = to_plot_df.append(pd.DataFrame({
            x_col:
            steps,
            y_col:
            density_v_max_mean,
            'error':
            density_v_max_error,
            'data name': [data_name] * len(steps),
        }),
                                       ignore_index=True)

    color_palette = get_cubehelix_palette(
        len(data_dict), reverse=False) if len(data_dict) > 1 else [(0, 0, 0)]
    sns.lineplot(x=x_col,
                 y=y_col,
                 data=to_plot_df,
                 hue='data name',
                 palette=color_palette,
                 style='data name',
                 markers=True,
                 dashes=False)

    ax = plt.gca()
    for i, data_name in enumerate(to_plot_df['data name'].unique()):
        to_plot_single_data = to_plot_df.loc[to_plot_df['data name'] ==
                                             data_name]
        lower = to_plot_single_data[
            y_col].values - to_plot_single_data['error'].values / 2
        upper = to_plot_single_data[
            y_col].values + to_plot_single_data['error'].values / 2
        ax.fill_between(to_plot_single_data[x_col],
                        lower,
                        upper,
                        color=color_palette[i],
                        alpha=0.2)

    plt.xlim(right=z_max)
    plt.yscale('log')
    # handles, labels = ax.get_legend_handles_labels()
    prop = {'size': legend_size} if legend_size else {}
    ax.legend(loc='upper right', framealpha=1.0,
              prop=prop)  # handles=handles[1:], labels=labels[1:],
    plt.setp(ax.get_legend().get_texts(), fontsize='9')
    plt.show()
Ejemplo n.º 14
0
def get_plan(initial_pose, goal_pose, counter):
    # Create a publisher to publish the initial pose
    init_pose_pub = rospy.Publisher(
        INIT_POSE_TOPIC, PoseWithCovarianceStamped,
        queue_size=1)  # to publish init position x=2500, y=640
    # Create a publisher to publish the goal pose
    goal_pose_pub = rospy.Publisher(
        GOAL_POSE_TOPIC, PoseStamped,
        queue_size=1)  # create a publisher for goal pose

    map_img, map_info = utils.get_map(MAP_TOPIC)  # Get and store the map
    PWCS = PoseWithCovarianceStamped(
    )  # create a PoseWithCovarianceStamped() msg
    PWCS.header.stamp = rospy.Time.now()  # set header timestamp value
    PWCS.header.frame_id = "map"  # set header frame id value

    temp_pose = utils.map_to_world(initial_pose, map_info)  # init pose
    PWCS.pose.pose.position.x = temp_pose[0]
    PWCS.pose.pose.position.y = temp_pose[1]
    PWCS.pose.pose.position.z = 0
    PWCS.pose.pose.orientation = utils.angle_to_quaternion(
        temp_pose[2]
    )  # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
    print "Pubplishing Initial Pose to topic", INIT_POSE_TOPIC
    print "Initial ", PWCS.pose
    init_pose_pub.publish(
        PWCS
    )  # publish initial pose, now you can add a PoseWithCovariance with topic of "/initialpose" in rviz

    if counter == 0:
        for i in range(0, 5):
            init_pose_pub.publish(PWCS)
            rospy.sleep(0.5)

    print "Initial Pose Set."
    # raw_input("Press Enter")

    PS = PoseStamped()  # create a PoseStamped() msg
    PS.header.stamp = rospy.Time.now()  # set header timestamp value
    PS.header.frame_id = "map"  # set header frame id value

    temp_pose = utils.map_to_world(goal_pose, map_info)  # init pose
    PS.pose.position.x = temp_pose[
        0]  # set msg x position to value of the x position in the look ahead pose from the path
    PS.pose.position.y = temp_pose[
        1]  # set msg y position to value of the y position in the look ahead pose from the path
    PS.pose.position.z = 0  # set msg z position to 0 since robot is on the ground
    PS.pose.orientation = utils.angle_to_quaternion(temp_pose[2])
    print "Pubplishing Goal Pose to topic", GOAL_POSE_TOPIC
    print "\nGoal ", PS.pose
    goal_pose_pub.publish(PS)

    print "Goal Pose Set"
    # raw_input("Press Enter")

    print "\nwaiting for plan ", str(counter), "\n"
    raw_plan = rospy.wait_for_message(PLAN_POSE_ARRAY_TOPIC, PoseArray)
    print "\nPLAN COMPUTED! of type:", type(raw_plan)

    path_part_pub = rospy.Publisher(
        "/CoolPlan/plan" + str(counter), PoseArray,
        queue_size=1)  # create a publisher for plan lookahead follower
    for i in range(0, 4):
        path_part_pub.publish(raw_plan)
        rospy.sleep(0.4)

    return raw_plan
Ejemplo n.º 15
0
    # set up waypoints and poses automatically; this does not always give
    # the most straightforward paths, so customized poses were used
    # instead

    #waypoints = calc_waypoint_orientation(order_waypoints(start_path, good_waypoints_path))

    start_pose = np.array([[2500, 640, 6.0]])

    waypoints = np.array([[2600, 660, 6.5], [2600, 450, 2.8], [1880, 440, 3.0],
                          [1699, 450, 4.5], [1590, 670,
                                             3.3], [1490, 570, 1.85],
                          [1430, 490, 2.7], [1250, 460, 3.0], [1150, 460, 3.2],
                          [950, 480, 3.6], [600, 700, 3.9], [540, 835, 4.8]])

    plan = []
    map_img, map_info = utils.get_map(MAP_TOPIC)

    map_to_world(start_pose, map_info)
    map_to_world(waypoints, map_info)

    pp = PathPlanner(plan, waypoints)

    # offline plan location
    offline_plan_path = '/home/car-user/anptaszn/src/final/offline_data/output_plan.npy'

    # check if offline plan already exists. if yes, do not
    # continue
    if os.path.isfile(offline_plan_path):

        print "offline plan found"
        offline_plan = np.load(offline_plan_path)
Ejemplo n.º 16
0
st.markdown('''
    ## Situazione Zone di Rischio (Colori ) Regionale 

    * [Dowload Dataset](https://github.com/visiont3lab/project-work-ifoa/blob/main/data/dpc-covid19-ita-regioni-zone.csv)
    
    Il dataset è stato ottenuto utilizzando il notebook [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)]
    (https://colab.research.google.com/github/visiont3lab/project-work-ifoa/blob/main/colab/AnalisiCovidRegioni.ipynb) 
    e partendo dai dati delle [aree della protezione civile](https://github.com/pcm-dpc/COVID-19/tree/master/aree)
               
''')

regione = st.selectbox("Seleziona la Regione", nomi_regioni)
st.dataframe(utils.get_zone_table(regione))
map_btn = st.button("Visualizza Mappa")
if map_btn:
    map_zone = utils.get_map()
    st.plotly_chart(map_zone)

st.markdown('''

Obbiettivo:

* Raccogliere, in modo automatico, i dati corrispondenti al colore delle regioni.
* Visualizzare attraverso una mappa il cambiamento di colore delle diverse regioni italiane in data specifica.

Il colore delle regione ad oggi è visibile a nel sito del ministero della satute [Classificazione Regioni e Province autonome
aggiornamento all'8 marzo](http://www.salute.gov.it/portale/nuovocoronavirus/dettaglioContenutiNuovoCoronavirus.jsp?area=nuovoCoronavirus&id=5351&lingua=italiano&menu=vuoto)
''')
# --------------------------------------------------------------------

# --------------------------------------------------------------------
Ejemplo n.º 17
0
def experiments_squad_manual_check(device,
                                   data_partition="train",
                                   print_text=False,
                                   embd_type="useqa",
                                   label_type="gold",
                                   seed=0,
                                   epoch=1):
    def get_training_labels(label_binarizer, query_indices, fact_indices,
                            negative_indices):
        label_masks = list(set(query_indices + fact_indices +
                               negative_indices))
        label_masks_onehot = np.sum(label_binarizer.transform(label_masks),
                                    axis=0)

        labels = list(set(query_indices + fact_indices))
        labels_onehot = np.sum(label_binarizer.transform(labels), axis=0)

        return labels_onehot, label_masks_onehot, np.array(labels), np.array(
            label_masks)

    def get_loss(criterion, prediction, target, mask):
        loss = torch.sum(
            criterion(prediction, target) * mask) / torch.sum(mask)
        return loss

    probe_model_root_path = "data_generated/squad/probe_experiment_2020-05-30_215643/"
    input_type = "query_" + embd_type + "_embd"
    probe_model_path = probe_model_root_path + "query_" + embd_type + "_embd_" + label_type + "_result_seed_" + str(
        seed) + "/best_linear_prober"
    saved_data_folder = 'data_generated/squad/'

    train_list, dev_list, kb = utils_dataset_squad.load_squad_probe_raw_data()
    vocab_dict, tfidf_vectorizer = utils_probe_squad.get_vocabulary(
        train_list, kb, saved_data_folder + "squad_vocab_dict.pickle",
        saved_data_folder + "squad_tfidf_vectorizer.pickle")

    instances_all_seeds = utils_probe_squad.get_probe_dataset(
        train_list, dev_list, kb, "", vocab_dict, tfidf_vectorizer,
        saved_data_folder, "squad_probe.pickle")

    linear_probe = torch.load(probe_model_path).to(device)
    linear_probe.eval()
    criterion = nn.BCELoss(reduction="none")

    target_vocab_size = len(vocab_dict)
    label_binarizer = sklearn.preprocessing.LabelBinarizer()
    label_binarizer.fit(range(target_vocab_size))
    vocab_dict_rev = {v: k for k, v in vocab_dict.items()}

    query_indices = "lemma_query_indices_" + label_type
    fact_indices = "lemma_fact_indices_" + label_type
    negative_indices = "lemma_negative_indices_" + label_type

    data_list = instances_all_seeds[seed][data_partition]

    total_loss = 0
    map_list = list([])
    ppl_list = list([])
    query_map_list = list([])
    query_ppl_list = list([])
    target_map_list = list([])
    target_ppl_list = list([])

    pred_score_dict = {}
    target_occur_dict = {}
    with torch.no_grad():
        for i, instance in enumerate(data_list):
            labels_onehot, masks_onehot, labels, label_masks = get_training_labels(
                label_binarizer, instance[query_indices],
                instance[fact_indices], instance[negative_indices])

            output_ = linear_probe(
                instance[input_type].to(device))  # output size is (6600)
            output = nn.functional.sigmoid(output_)

            if print_text:
                output_numpy = output.detach().cpu().numpy()
                top_preds = np.flip(np.argsort(output_numpy))
                print("=" * 20)
                print("\tquery:", instance["lemmas_query"])
                print("\tfact:", instance["lemmas_fact"])
                print('\ttop pred lemma:',
                      [vocab_dict_rev[idx] for idx in top_preds[:20]])
                input("A")

            loss = get_loss(
                criterion, output,
                torch.tensor(labels_onehot, dtype=torch.float32).to(device),
                torch.tensor(masks_onehot, dtype=torch.float32).to(device))

            total_loss += loss.detach().cpu().numpy()

            map_list.append(get_map(output.detach().cpu().numpy(), labels))
            ppl_list.append(get_ppl(output.detach().cpu().numpy(), labels))

            query_map_list.append(
                get_map(output.detach().cpu().numpy(),
                        np.array(instance[query_indices])))
            query_ppl_list.append(
                get_ppl(output.detach().cpu().numpy(),
                        np.array(instance[query_indices])))

            if len(set(instance[fact_indices]) -
                   set(instance[query_indices])) > 0:
                target_map_list.append(
                    get_map(
                        output.detach().cpu().numpy(),
                        np.array(list(
                            set(instance[fact_indices]) -
                            set(instance[query_indices])),
                                 dtype=np.int64)))
                target_ppl_list.append(
                    get_ppl(
                        output.detach().cpu().numpy(),
                        np.array(list(
                            set(instance[fact_indices]) -
                            set(instance[query_indices])),
                                 dtype=np.int64)))

            for pred_lemma_indices in list(
                    set(instance[fact_indices]) -
                    set(instance[query_indices])):
                pred_lemma = vocab_dict_rev[pred_lemma_indices]
                if pred_lemma not in pred_score_dict:
                    pred_score_dict[pred_lemma] = 0
                    target_occur_dict[pred_lemma] = 0
                target_occur_dict[pred_lemma] += 1
                pred_score_dict[pred_lemma] += output[pred_lemma_indices].item(
                )

            if print_text:
                print("=" * 20)
                print("query:", instance["lemmas_query"])
                print("fact", instance["lemmas_fact"])
                print("negative", instance["lemmas_negative"])

                print("positive token reconstructed:",
                      [vocab_dict_rev[lemma_idx] for lemma_idx in labels])
                print("negative token reconstructed:", [
                    vocab_dict_rev[lemma_idx]
                    for lemma_idx in list(set(label_masks) - set(labels))
                ])
                print("query reconstructed", [
                    vocab_dict_rev[lemma_idx]
                    for lemma_idx in instance[query_indices]
                ])
                print("fact alone reconstructed:", [
                    vocab_dict_rev[lemma_idx]
                    for lemma_idx in instance[fact_indices]
                ])

                input("--------")

    result_dict = {
        "eval_loss": total_loss / len(dev_list),
        "avg map": sum(map_list) / len(map_list),
        "avg ppl": sum(ppl_list) / len(ppl_list),
        "query map:": sum(query_map_list) / len(query_map_list),
        "query ppl:": sum(query_ppl_list) / len(query_ppl_list),
        "target map:": sum(target_map_list) / len(target_map_list),
        "target ppl:": sum(target_ppl_list) / len(target_ppl_list)
    }
    print("-" * 20)
    print(result_dict)

    print("-" * 20)
    pred_freq_dict_avg = {}
    for k in pred_score_dict.keys():
        pred_freq_dict_avg[k] = pred_score_dict[k] / target_occur_dict[k]

    tokens_sorted_by_occur = sorted(target_occur_dict.items(),
                                    key=lambda kv: kv[1])
    for histo_tuple in list(reversed(tokens_sorted_by_occur)):
        print("token:", histo_tuple[0], "\tn occur:", histo_tuple[1],
              "\tavg prob:", pred_freq_dict_avg[histo_tuple[0]])

    return 0
Ejemplo n.º 18
0
    def __init__(self):
        '''
        constructor
        '''

        print('Driver constructor')


        self.scale = 0.05
        self.offset_x = 200 # 400
        self.offset_y = 200 # 400

        #Initialize ros node
        rospy.init_node('turtlebot_driver')
        
        #Starting position: in pixels
        self.x_start = rospy.get_param('cur_pos_x', self.offset_x)
        self.y_start = rospy.get_param('cur_pos_y', self.offset_y)
        self.theta_start = rospy.get_param('cur_pos_theta', 0)

        #Initialize goals
        self.x = np.array([])
        self.y = np.array([])
        self.theta = np.array([])

        
        #Threshold for distance to goal
        self.goal_th_xy = rospy.get_param('goal_thershold_xy',0.1) #Position threshold
        self.goal_th_ang = rospy.get_param('goal_threshold_ang',0.001) #Orientation threshold
        
        #Point to the first goal
        self.active_goal = 0

        #Initialize number of goals
        self.num_goals = 1

        #Has the goal been loaded?
        self.params_loaded = False

        # For rviz plotting lines
        self.tfBroad = tf.TransformBroadcaster()
        self.i = 0 

       

        #Some parameters for LaserScan information
        self.sensor_spacing = 5 
        self.sensor_max_range = 10
        
        # Define publisher        
        self.pub = rospy.Publisher("/cmd_vel_mux/input/teleop", Twist, queue_size=100)
        self.pub_line = rospy.Publisher("lines", Marker,queue_size=0)     
        self.pub_map = rospy.Publisher("linesekf", Marker, queue_size=2)
        self.pub_traj = rospy.Publisher("trajectory", Marker, queue_size=2)

        # Define subscriber
        ##################################################################
        
        self.sub_sensor = rospy.Subscriber("/scan", LaserScan, self.scan_callback)
        self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback)

        #define the velocity message
        self.vmsg = Twist()

        # Initialize robot's position (in meters)
        self.position_x = rospy.get_param('cur_pos_world_x', 0)
        self.position_y = rospy.get_param('cur_pos_world_y', 0)
        self.position_theta = rospy.get_param('cur_pos_world_theta', 0)

        #Controller parameters
        self.kp_v = 0.1
        self.kp_w = 1.5
        self.kd_v = 0
        self.kd_w = 0
        # self.ki_w = 0.001

        #Controller variables
        self.d_prev = 0
        self.dt_prev = 0
        
        # Obstacle avoidance factors
        self.oa_v = 1
        self.oa_w = 0
        #self.counter = 0

        # Publish map lines
        self.map = utils.get_map() # get_map_udg()
        self.trajectory = np.zeros((0, 4))