Example #1
0
    def get_extended_observation(self):
        self._observation = []

        if not self._ft_obs_only:
            if self.action_dim > 3:
                pos, orn = self.member_pose[0], self.member_pose[1]
                self._observation.extend(pos)
                self._observation.extend(orn)
            else:
                pos = self.member_pose[0]
                self._observation.extend(pos)

        self.force_torque = self.get_force_torque()

        if WRITE_CSV:
            util.write_csv([self._env_step_counter] + self.force_torque,
                           'ft_reading.csv', False)

        if self._limit_force_torque:
            self.check_ft_limit(self.force_torque)
        # if self._force_torque_violations != [0]*len(self.force_torque):
        # 	util.prRed(self._force_torque_violations)

        self._observation.extend(self.force_torque)

        return self._observation
def write_measurements_to_csv(name, measurements):
    rows = [['latitude', 'longitude', 'velocity', 'heading']] + [[
        measurement.latitude, measurement.longitude, measurement.velocity,
        measurement.heading
    ] for measurement in measurements]
    write_csv(name, rows)


# if (__name__ == "__main__"):
#     name = 'track_2018-03-14_134847'
#     gpx = load_gpx_file('data/{}.gpx'.format(name))
#     measurements = extract_gps_measurements(gpx)
#     write_measurements_to_csv(name, measurements)
#     for measurement in list(measurements)[:10]:
#         print(measurement.latitude, measurement.longitude, measurement.velocity, measurement.heading)
Example #3
0
    def step(self, action):
        if len(action) > 3:
            delta_lin = np.array(action[0:3]) * self._max_vel * self._time_step
            delta_rot = np.array(action[3:6]) * self._max_rad * self._time_step
            delta = np.append(delta_lin, delta_rot)
        else:
            delta = np.array(action) * self._max_vel * self._time_step

        if self._limit_force_torque:
            self.constrain_velocity_for_ft(delta)

        if WRITE_CSV:
            util.write_csv([self._env_step_counter] + list(delta),
                           'data_out.csv', False)

        return self.step2(delta)
Example #4
0
    def pos_dist_to_target(self):
        self.member_pose = self.get_member_pose()

        # log
        if WRITE_CSV:
            util.write_csv([self._env_step_counter] + self.member_pose[0] +
                           self.member_pose[1], 'member_pose.csv', False)

        member_pos = list(self.member_pose[0])
        target_pose = self.get_target_pose()
        target_pos = list(target_pose[0])

        dist_pos = np.linalg.norm(np.subtract(member_pos,
                                              target_pos))  # linear dist in m
        # util.prGreen("pos dist: {}".format(dist_pos))

        return dist_pos
Example #5
0
def write_pred_to_csv(file_names, model_preds, path="data/submission.csv"):
    csv_list = []
    for i in range(len(model_preds)):
        csv_row = ['', '']
        csv_row[0] = file_names[i]
        s = 'new_whale'  # string containing the five whale names separated by blanks
        for j in range(len(model_preds[i]) -
                       1):  # run over 5 ordered predictions
            # if j>0:
            s = s + ' '
            s = s + model_preds[i][j]
            # print("next_s", s)
        csv_row[1] = s
        csv_list.append(csv_row)
    # print("csv_list", csv_list)
    print("write csv file")
    ut.write_csv(csv_list, path)
    print("done writing csv file")
Example #6
0
def main():
    # create the base pre-trained model
    base_model = InceptionV3(weights='imagenet', include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer
    num_classes = len(os.listdir(INPUT_DIRECTORY))
    predictions = Dense(num_classes, activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in base_model.layers:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy')

    # define image generator
    train_gen = image.ImageDataGenerator()

    # train the model on the new data for a few epochs
    model.fit_generator(train_gen.flow_from_directory(INPUT_DIRECTORY),
                        steps_per_epoch=3,
                        epochs=1,
                        verbose=2)

    # let's predict the test set to see a rough score
    labels = make_label_dict()
    test_gen = image.ImageDataGenerator()
    flow = test_gen.flow_from_directory(INPUT_DIRECTORY, class_mode=None)
    predictions = model.predict_generator(flow, verbose=1)  # steps=15611//32)
    top_k = predictions.argsort()[:, -4:][:, ::-1]
    classes = [" ".join([labels[i] for i in line]) for line in top_k]
    filenames = flow.filenames  # [os.path.basename(f) for f in flow.filenames]
    csv_list = zip(filenames, classes)
    write_csv(csv_list, file_name=OUTPUT_FILE)
Example #7
0
a = 0
for action in actions:
    a += 1
    associated = []
    f = 0
    for filename in iterate_directory(
            '/home/user/Projects/Python/Recipes/Cookbook'):
        f += 1
        for step in get_steps(
                get_lines('/home/user/Projects/Python/Recipes/Cookbook/' +
                          filename)):
            s = 0
            for sentence in get_sentences(step):
                s += 1
                if action in get_words(sentence):
                    for word in get_words(sentence):
                        for ingredient in ingredients:
                            if ingredient in word or word == ingredient:
                                associated.append(ingredient)
                print('Action: ' + str(a) + '		File: ' + str(f) +
                      '		Sentence: ' + str(s),
                      end='\r')

    occurance = {}
    try:
        occurance = get_occurances(associated)
    except:
        occurance['all'] = 0
    write_csv('information/associations/' + action + '.csv', occurance)
usleg_db = client["US_LEGISLATOR"]
usleg_db.authenticate("readonly", "smappnyu")

# Query for tweets - more complex than we're used to. Using AND.
# (NOTE that mongo explicitly ANDs multiple comma-separated search clauses, 
# however, if two clauses on the same field with the same operator, must use $and)
start = datetime(2013, 8, 21)
end = datetime(2013, 9, 7)

results = usleg_db.legislator_tweets.find({ 
	"timestamp": {"$gte": start, "$lt": end},
	"$and": [
		{"text": {"$regex": "syria", "$options": "i"}},
		{"text": {"$regex": "interven", "$options": "i"}}
		]
	})

print "Found {0} tweets on topic. Writing to CSV file".format(
	results.count(with_limit_and_skip=True))

write_csv(results, "usleg-syria-intervene.csv")









Example #9
0
    def __init__(self,
                 time_step=None,
                 max_steps=None,
                 step_limit=None,
                 action_dim=None,
                 max_vel=None,
                 max_rad=None,
                 ft_obs_only=None,
                 limit_ft=None,
                 max_ft=None,
                 max_position_range=None,
                 dist_threshold=None):
        super().__init__()

        self._max_step = max_steps
        self._step_limit = step_limit
        # max linear and rotational velocity command
        self._max_vel = max_vel
        self._max_rad = max_rad
        # only use force torque as observation
        self._ft_obs_only = ft_obs_only

        self._time_step = time_step
        self._observation = []
        self._env_step_counter = 0
        self._num_success = 0

        self._limit_force_torque = limit_ft
        self._max_force_torque = max_ft
        self._force_torque_violations = [0.0] * len(self._max_force_torque)
        self._ft_range_ratio = 1
        """ Define Gym Spaces for observations and actions """
        self._max_pos_range = max_position_range
        if self._ft_obs_only:  # no pose observation
            self.observation_dim = len(self._max_force_torque)
            observation_high = np.array(self._max_force_torque)
            observation_low = -observation_high
        elif action_dim == 6:  # 6 DOF
            self.observation_dim = 7 + len(self._max_force_torque)
            observation_orn_high = [1] * 4
            observation_high = np.array(self._max_pos_range +
                                        observation_orn_high +
                                        self._max_force_torque)
            observation_low = -observation_high
        else:  # 3 DOF
            self.observation_dim = 3 + len(self._max_force_torque)
            observation_high = np.array(self._max_pos_range +
                                        self._max_force_torque)
            observation_low = -observation_high
        self.observation_space = spaces.Box(observation_low, observation_high)

        self._action_bound = 1
        action_high = np.array([self._action_bound] * action_dim)
        self.action_space = spaces.Box(-action_high, action_high)
        self.action_dim = action_dim

        self.member_pose = []
        self.force_torque = []

        self.dist_threshold = dist_threshold

        # csv headers
        if WRITE_CSV:
            util.write_csv([
                "step_member_pose", "pos_X", "pos_Y", "pos_Z", "qX", "qY",
                "qZ", "qW"
            ], 'member_pose.csv', True)
            util.write_csv(["step_ft", "Fx", "Fy", "Fz", "Tx", "Ty", "Tz"],
                           'ft_reading.csv', True)
            if self.action_dim == 3:
                util.write_csv(["step_actions", "vel_X", "vel_Y", "vel_Z"],
                               'data_out.csv', True)
            else:
                util.write_csv([
                    "step_actions", "vel_X", "vel_Y", "vel_Z", "rot_vel_X",
                    "rot_vel_Y", "rot_vel_Z"
                ], 'data_out.csv', True)
Example #10
0
def main():
    stocks = config.stocks
    get_data_csv_name = config.get_data_csv_name
    db_csv_name = config.db_csv_name

    subprocess.call(["rm", get_data_csv_name])
    subprocess.call(["rm", db_csv_name])

    number = 1

    for stock_index in range(len(stocks) - 1):
        qiita_api = os.environ['QIITA_API']
        url = "https://qiita.com/api/v2/items"
        h = {"Authorization": "Bearer " + qiita_api}
        p = {
            'per_page':
            100,
            'query':
            'stocks:<{} stocks:>{}'.format(str(int(stocks[stock_index]) + 1),
                                           stocks[stock_index + 1])
        }
        response = requests.get(url, params=p, headers=h)
        response_list = json.loads(response.text)
        for index, item in enumerate(response_list):
            created_at = response_list[index]["created_at"]
            article_id = response_list[index]["id"]
            likes_count = response_list[index]["likes_count"]
            tags = []
            for tag_index in range(5):
                try:
                    tags.append(
                        response_list[index]["tags"][tag_index]["name"])
                except IndexError:
                    tags.append(None)
            title = response_list[index]["title"]
            updated_at = response_list[index]["updated_at"]
            url = response_list[index]["url"]
            user_id = response_list[index]["user"]["id"]
            number = utilities.write_csv(get_data_csv_name, number, article_id,
                                         user_id, title, likes_count, url,
                                         tags, created_at, updated_at)

    utilities.delete_csv_row(get_data_csv_name, db_csv_name)

    dt_now = datetime.datetime.now()
    file = open('update_log.txt', 'a')
    file.write(str(dt_now) + "\n")
    file.close()

    conn = utilities.get_connection()
    cur = conn.cursor()
    cur.execute('DELETE FROM update_time')
    cur.execute('INSERT INTO update_time VALUES (' + str(dt_now.year) + ',' +
                str(dt_now.month) + ',' + str(dt_now.day) + ')')
    cur.execute('DELETE FROM articles')

    f = open('db.csv', 'r')
    cur.copy_from(f, 'articles', sep=',', null='\\N')
    conn.commit()
    cur.close()
    conn.close()
Example #11
0
for item in all_items:
    try:
        for file in iterate_directory(
                '/home/user/Projects/Python/Recipes/Cookbook'):
            ings = generate_ingredients(
                '/home/user/Projects/Python/Recipes/Cookbook/' + file)
            r = recipe(ings)
            if item in r.items:
                for it in r.items:
                    total_associations.append(it)
            log = open('log_connect.txt', 'w')
            log.write('item: ' + item + ', file: ' + file +
                      ', associations: ' + str(len(total_associations)))
            log.close()

        global dict
        dict = {}
        if len(total_associations) > 0:
            dict = get_occurances(total_associations)

        name = '_'.join(item.strip(' "\'').split(' ')).strip(' "\'').strip(
            "'").strip('"') + 'csv'

        if len(total_associations) < 1:
            for item in all_items:
                dict[item] = 0

        write_csv('information/associations/' + name, dict)
    except:
        pee = 'poo'
Example #12
0
#path3.tolist()
#print(path3)
#print(path3)
kf = KFold(n_splits=5)
kf2 = StratifiedKFold(n_splits=5, shuffle=True)
i = 0

for train, test in kf2.split(path3, label):
    #print(test)
    #print(test.shape)
    example = path3[test]
    example = list(example)
    print(example)

    FILENAME = LABEL_PATH + '/fold_' + str(i) + '.csv'
    UT.write_csv(FILENAME, example)

    #FILENAME = LABEL_PATH + '/fold_' + str(i) + '.csv'
    df = pd.read_csv(FILENAME, header=None)

    data = df.values
    data = list(map(list, zip(*data)))
    data = pd.DataFrame(data)
    data.to_csv(FILENAME, header=0, index=0)
    i = i + 1

    #print(example)
    #print(example)

filenames = [
    LABEL_PATH + '/fold_0.csv', LABEL_PATH + '/fold_1.csv',
Example #13
0
usleg_db.authenticate("readonly", "smappnyu")

# Query for tweets - more complex than we're used to. Using AND.
# (NOTE that mongo explicitly ANDs multiple comma-separated search clauses,
# however, if two clauses on the same field with the same operator, must use $and)
start = datetime(2013, 8, 21)
end = datetime(2013, 9, 7)

results = usleg_db.legislator_tweets.find({
    "timestamp": {
        "$gte": start,
        "$lt": end
    },
    "$and": [{
        "text": {
            "$regex": "syria",
            "$options": "i"
        }
    }, {
        "text": {
            "$regex": "interven",
            "$options": "i"
        }
    }]
})

print "Found {0} tweets on topic. Writing to CSV file".format(
    results.count(with_limit_and_skip=True))

write_csv(results, "usleg-syria-intervene.csv")
Example #14
0
for item in all_items:
    try:
        all_props = {}
        for file in iterate_directory(
                '/home/user/Projects/Python/Recipes/Cookbook'):
            temp = {}
            log = open('log_prop.txt', 'w')
            log.write('item: ' + item + ', file: ' + file)
            log.close()
            ingredients = generate_ingredients(
                '/home/user/Projects/Python/Recipes/Cookbook/' + file)
            rec = recipe(ingredients)
            if item in rec.items:
                for food in rec.items:
                    temp[food] = rec.items[item] / (rec.items[food] + 0.000001)
            else:
                for food in rec.items:
                    temp[food] = 0
            for food in temp:
                if food not in all_props:
                    all_props[food] = temp[food]
                else:
                    if temp[food] != 0:
                        all_props[food] = (all_props[food] + temp[food]) / 2
        name = '_'.join(item.strip(' "\'').split(' ')).strip(' "\'').strip(
            "'").strip('"') + '.csv'
        write_csv('information/proportions/' + name, all_props)
        print('done')
    except:
        pee = 'poo'
Example #15
0
    # input_data_file = APP_DIR + '/data/input/debug.txt'
    input_data_file = APP_DIR + '/data/input/word_list.txt'
    input_word_list = load_input_word_list(input_data_file)
    load_vn_dict()
    vn_dict = VNDict.get_instance()

    # analyze
    results = {WordTypeEnum.VERB: [], WordTypeEnum.ADJ: []}

    for input_word in input_word_list:
        words_were_found = vn_dict.look_up(input_word.txt,
                                           w_kind=input_word.kind)
        if not words_were_found:
            continue

        nearest_word = find_nearest_word(input_word, words_were_found)
        results[nearest_word.type].append(nearest_word)

    if Setting.GREEDY:
        additional_verbs, additional_adjectives = greedy(
            results, Setting.GREEDY_ALGORITHMS)
        results[
            WordTypeEnum.VERB] = results[WordTypeEnum.VERB] + additional_verbs
        results[WordTypeEnum.
                ADJ] = results[WordTypeEnum.ADJ] + additional_adjectives

    write_csv(APP_DIR + '/data/output/{}.txt'.format(WordTypeEnum.VERB.name),
              results[WordTypeEnum.VERB])
    write_csv(APP_DIR + '/data/output/{}.txt'.format(WordTypeEnum.ADJ.name),
              results[WordTypeEnum.ADJ])