Exemple #1
0
def load_data(file_name, shuffle=True, verbose=False):
    global CLASSES
    file_sentences = []
    file_labels = []
    file_ids = []

    for label, c in enumerate(CLASSES):
        if verbose: print('reading ' + file_name + '.' + c)
        with open('%s.%s' % (file_name, c), 'r', encoding='utf8') as f:
            for line in f:
                sentence = line[19:].strip()
                if len(sentence) > 1:
                    file_sentences.append(sentence)
                    file_ids.append(int(line[:18].strip()))
                    file_labels.append(label)
                else:
                    if verbose: print('One empty line.')
    file_data = [file_sentences, file_labels, file_ids]
    if shuffle:
        file_indexes = list(range(len(file_sentences)))
        shuffle_data = lambda v, ind: [v[i] for i in ind]
        shuffle_list(file_indexes)
        for i in range(len(file_data)):
            file_data[i] = shuffle_data(file_data[i], file_indexes)
    if verbose: print('Done reading.\n')
    return file_data
Exemple #2
0
def gen_puzzles():
    all_ids = [puzzle.id for puzzle in Puzzles.query.all()]
    pids = []

    while len(pids) != POSSIBLE_COMPLETED:
        pid = choose_random(all_ids)
        if pid not in pids:
            pids.append(pid)
        shuffle_list(all_ids)

    return pids
Exemple #3
0
    def play(self):
        shuffle_list(self.players)

        print('Playing order has been chosen:')
        for i, player in enumerate(self.players, start=1):
            print(f"Player {i} will be {player.name}.")

        winner = False
        while not winner:

            plays = list()  # [player.play(0) for player in self.players]
            for player in self.players:
                print(f"{player.name}, it's your turn.")
                if len(player.won_cards) > 0:
                    print("You have these cards in your stash:")
                    for card in player.won_cards:
                        print(card)
                print("You have these cards on your hand:")
                plays.append(player.play(self.choose(player.hand)))

            for i, player in enumerate(self.players):
                print(f"{player.name} plays a {plays[i]} ")

            if plays[0] > plays[1]:
                print(f"{self.players[0].name} wins the round!")
                self.players[0].add_won_card(plays[0])
            elif plays[0] < plays[1]:
                print(f"{self.players[1].name} wins the round!")
                self.players[1].add_won_card(plays[1])
                self.players = self.swap_list(self.players)
            else:
                # TODO: A draw could result in the next winner gets to
                # TODO: choose either card from the draw pile
                print('It was a draw! Both cards are discarded.')

            for player in self.players:
                if player.winner():
                    print(f"{player.name} won these cards:")
                    print(f"{player.wins()}")
                    print(f"The winner is {player.name}")
                    winner = True

            if not winner and len(self.deck.cards) == 0 and len(
                    self.players[0].hand) == 0:
                winner = True
                print(f"There is no winner. What even is the purpose?")

            if len(self.deck.cards) > 1:
                for player in self.players:
                    player.draw(self.deck)
def _chunk_tensor(
    x: Tuple[Optional[torch.Tensor],
             ...], y: torch.Tensor, num_sections: int, shuffle: bool
) -> Iterable[Tuple[Tuple[Optional[torch.Tensor], ...], torch.Tensor]]:
    split_x = []
    for idx, x_section in enumerate(x):
        if x_section is not None:
            split_x.append(torch.chunk(x_section, num_sections))
        else:
            split_x.append([None] * num_sections)  # type: ignore
    split_y = torch.chunk(y, num_sections)
    return_arrays = list(zip(*split_x, split_y))

    if shuffle:
        shuffle_list(return_arrays)
    return [(chunk[:-1], chunk[-1]) for chunk in return_arrays]  # type: ignore
Exemple #5
0
def _chunk_ndarray(
    x: Tuple[Optional[np.ndarray],
             ...], y: np.ndarray, num_sections: int, shuffle: bool
) -> Iterable[Tuple[Tuple[Optional[np.ndarray], ...], np.ndarray]]:

    split_x = []
    for idx, x_section in enumerate(x):
        if x_section is not None:
            split_x.append(np.array_split(x_section, num_sections))
        else:
            split_x.append([None] * num_sections)
    split_y = np.array_split(y, num_sections)
    return_arrays = list(zip(*split_x, split_y))

    if shuffle:
        shuffle_list(return_arrays)
    return [(chunk[:-1], chunk[-1]) for chunk in return_arrays]  # type: ignore
Exemple #6
0
    def __init__(self, data, targets, length, start_index=0,
                 shuffle=False, reverse=False, batch_size=128):
        super().__init__(data, targets, length, start_index=start_index,
                         shuffle=shuffle, reverse=reverse,
                         end_index=len(data[0]), batch_size=batch_size)
        assert isinstance(data, list), 'data must be list of timeseries'
        if any(isinstance(i, pd.DataFrame) for i in self.data):
            self.data = [i.values for i in self.data]
        if any(isinstance(i, pd.DataFrame) for i in self.targets):
            self.targets = [i.values for i in self.targets]
        if self.shuffle:
            zippd = list(zip(self.data, self.targets))
            shuffle_list(zippd)  # inplace operation
            self.data, self.targets = list(zip(*zippd))
        # start index is the same for each profile
        # for each profile there's a different end_index
        self.end_index = [len(d)-1 for d in self.data]

        batches_per_profile = [(e - self.start_index + self.batch_size)//
                               self.batch_size for e in self.end_index]
        self.data_len = sum(batches_per_profile)
        self.batch_cumsum = np.cumsum(batches_per_profile)
Exemple #7
0
 def reset(self):
     self._next_counter = 0
     if not self._random_distribution:
         shuffle_list(self._indices)
Exemple #8
0
    def next_song(self):
        if self.shuffle:
            self._songs = shuffle_list(self._songs)

        ...
Exemple #9
0
def build_tfrecord_single(conf,
                          mode='train',
                          input_files=None,
                          shuffle=True,
                          buffersize=512):
    """Create input tfrecord tensors.

    Args:
      training: training or validation data_files.
      conf: A dictionary containing the configuration for the experiment
    Returns:
      list of tensors corresponding to images, actions, and states. The images
      tensor is 5D, batch x time x height x width x channels. The state and
      action tensors are 3D, batch x time x dimension.
    Raises:
      RuntimeError: if no files found.
    """
    if 'sdim' in conf:
        sdim = conf['sdim']
    else:
        sdim = 3
    if 'adim' in conf:
        adim = conf['adim']
    else:
        adim = 4
    print('adim', adim)
    print('sdim', sdim)

    if input_files is not None:
        if not isinstance(input_files, list):
            filenames = [input_files]
        else:
            filenames = input_files
    else:
        filenames = gfile.Glob(os.path.join(conf['data_dir'], mode) + '/*')
        if mode == 'val' or mode == 'test':
            shuffle = False
        else:
            shuffle = True
        if not filenames:
            raise RuntimeError('No data_files files found.')

    print('using shuffle: ', shuffle)
    if shuffle:
        shuffle_list(filenames)
    # Reads an image from a file, decodes it into a dense tensor, and resizes it
    # to a fixed shape.
    def _parse_function(serialized_example):
        image_seq, image_main_seq, endeffector_pos_seq, gen_images_seq, gen_states_seq,\
        action_seq, object_pos_seq, robot_pos_seq, goal_image = [], [], [], [], [], [], [], [], []

        load_indx = list(range(0, conf['sequence_length'], conf['skip_frame']))
        print('using frame sequence: ', load_indx)

        rand_h = tf.random_uniform([1], minval=-0.2, maxval=0.2)
        rand_s = tf.random_uniform([1], minval=-0.2, maxval=0.2)
        rand_v = tf.random_uniform([1], minval=-0.2, maxval=0.2)
        features_name = {}

        for i in load_indx:
            image_names = []
            if 'view' in conf:
                cam_ids = [conf['view']]
            else:
                if 'ncam' in conf:
                    ncam = conf['ncam']
                else:
                    ncam = 1
                cam_ids = range(ncam)

            for icam in cam_ids:
                image_names.append(
                    str(i) + '/image_view{}/encoded'.format(icam))
                features_name[image_names[-1]] = tf.FixedLenFeature([1],
                                                                    tf.string)

            if 'image_only' not in conf:
                action_name = str(i) + '/action'
                endeffector_pos_name = str(i) + '/endeffector_pos'

            if 'image_only' not in conf:
                features_name[action_name] = tf.FixedLenFeature([adim],
                                                                tf.float32)
                features_name[endeffector_pos_name] = tf.FixedLenFeature(
                    [sdim], tf.float32)

            if 'test_metric' in conf:
                robot_pos_name = str(i) + '/robot_pos'
                object_pos_name = str(i) + '/object_pos'
                features_name[robot_pos_name] = tf.FixedLenFeature(
                    [conf['test_metric']['robot_pos'] * 2], tf.int64)
                features_name[object_pos_name] = tf.FixedLenFeature(
                    [conf['test_metric']['object_pos'] * 2], tf.int64)

            if 'load_vidpred_data' in conf:
                gen_image_name = str(i) + '/gen_images'
                gen_states_name = str(i) + '/gen_states'
                features_name[gen_image_name] = tf.FixedLenFeature([1],
                                                                   tf.string)
                features_name[gen_states_name] = tf.FixedLenFeature([sdim],
                                                                    tf.float32)

            features = tf.parse_single_example(serialized_example,
                                               features=features_name)

            images_t = []
            for image_name in image_names:
                image = decode_im(conf, features, image_name)

                if 'color_augmentation' in conf:
                    # print 'performing color augmentation'
                    image_hsv = tf.image.rgb_to_hsv(image)
                    img_stack = [
                        tf.unstack(imag, axis=2)
                        for imag in tf.unstack(image_hsv, axis=0)
                    ]
                    stack_mod = [
                        tf.stack([x[0] + rand_h, x[1] + rand_s, x[2] + rand_v],
                                 axis=2) for x in img_stack
                    ]

                    image_rgb = tf.image.hsv_to_rgb(tf.stack(stack_mod))
                    image = tf.clip_by_value(image_rgb, 0.0, 1.0)
                images_t.append(image)

            image_seq.append(tf.stack(images_t, axis=1))

            if 'image_only' not in conf:
                endeffector_pos = tf.reshape(features[endeffector_pos_name],
                                             shape=[1, sdim])
                endeffector_pos_seq.append(endeffector_pos)
                action = tf.reshape(features[action_name], shape=[1, adim])
                action_seq.append(action)

            if 'test_metric' in conf:
                robot_pos = tf.reshape(features[robot_pos_name], shape=[1, 2])
                robot_pos_seq.append(robot_pos)

                object_pos = tf.reshape(
                    features[object_pos_name],
                    shape=[1, conf['test_metric']['object_pos'], 2])
                object_pos_seq.append(object_pos)

            if 'load_vidpred_data' in conf:
                gen_images_seq.append(decode_im(gen_image_name))
                gen_states = tf.reshape(features[gen_states_name],
                                        shape=[1, sdim])
                gen_states_seq.append(gen_states)

        return_dict = {}
        image_seq = tf.concat(values=image_seq, axis=0)
        image_seq = tf.squeeze(image_seq)
        if 'use_cam' in conf:
            image_seq = image_seq[:, conf['use_cam']]
        return_dict['images'] = image_seq

        if 'goal_image' in conf:
            features_name = {}
            features_name['/goal_image'] = tf.FixedLenFeature([1], tf.string)
            features = tf.parse_single_example(serialized_example,
                                               features=features_name)
            goal_image = tf.squeeze(decode_im(conf, features, '/goal_image'))
            return_dict['goal_image'] = goal_image

        if 'first_last_noarm' in conf:
            features_name = {}
            features_name['/first_last_noarm0'] = tf.FixedLenFeature([1],
                                                                     tf.string)
            features = tf.parse_single_example(serialized_example,
                                               features=features_name)
            first_last_noarm0 = tf.squeeze(
                decode_im(conf, features, '/first_last_noarm0'))
            features_name['/first_last_noarm1'] = tf.FixedLenFeature([1],
                                                                     tf.string)
            features = tf.parse_single_example(serialized_example,
                                               features=features_name)
            first_last_noarm1 = tf.squeeze(
                decode_im(conf, features, '/first_last_noarm1'))
            return_dict['first_last_noarm'] = tf.stack(
                [first_last_noarm0, first_last_noarm1], axis=0)

        if 'image_only' not in conf:
            if 'no_touch' in conf:
                return_dict['endeffector_pos'] = tf.concat(
                    endeffector_pos_seq, 0)[:, :-2]
            else:
                return_dict['endeffector_pos'] = tf.concat(
                    endeffector_pos_seq, 0)

            if 'autograsp' in conf:
                return_dict['actions'] = tf.concat(action_seq, 0)[:, :-1]
            else:
                return_dict['actions'] = tf.concat(action_seq, 0)

        if 'load_vidpred_data' in conf:
            return_dict['gen_images'] = gen_images_seq
            return_dict['gen_states'] = gen_states_seq

        return return_dict

    dataset = tf.data.TFRecordDataset(filenames)
    dataset = dataset.map(_parse_function)

    if 'max_epoch' in conf:
        dataset = dataset.repeat(conf['max_epoch'])
    else:
        dataset = dataset.repeat()

    if shuffle:
        dataset = dataset.shuffle(buffer_size=buffersize)
    dataset = dataset.batch(conf['batch_size'])
    iterator = dataset.make_one_shot_iterator()
    next_element = iterator.get_next()

    output_element = {}
    for k in list(next_element.keys()):
        output_element[k] = tf.reshape(
            next_element[k],
            [conf['batch_size']] + next_element[k].get_shape().as_list()[1:])

    return output_element
Exemple #10
0
    def regulariza(self,
                   G,
                   sentence_nodes,
                   train_labels,
                   path_out,
                   total_pre_anotados=0.3,
                   method='gfhf'):
        """
            gerar as features de regularização
        """
        total_samples = len(train_labels)
        cods = list(range(total_samples))
        shuffle_list(cods)

        anotados = cods[0:int(total_samples * total_pre_anotados)]

        train = 'train_%d'
        test = 'test_%d'

        keys_anotados = []
        for i in range(total_samples):
            if i in anotados:
                G.nodes[sentence_nodes[train % i]]['label'] = train_labels[i]
                keys_anotados.append(train % i)

        with open('anotados.txt', 'w') as f:
            for i in anotados:
                f.write('%d\n' % i)

        labels = ['p', 'nao_p']
        columns = ['id', labels[0], labels[1], 'classe']
        rows_train = []
        rows_test = []

        if method == 'gfhf':
            F = harmonic_function(G)
        elif method == 'llgc':
            F = local_and_global_consistency(G)

        if method in ['gfhf', 'llgc']:
            for key in sentence_nodes.keys():
                id_node = sentence_nodes[key]
                split_key_node = key.split('_')
                t = split_key_node[0]
                cod = int(split_key_node[1])
                if t == 'train':
                    rows_train.append([
                        id_node, F[id_node][0], F[id_node][1],
                        train_labels[cod]
                    ])

                if t == 'test':
                    rows_test.append([id_node, F[id_node][0], F[id_node][1]])
        elif method == 'gnetmine':
            M = GNetMine(graph=G)
            c = M.run()
            F = M.f['sentence_pair']
            labels = M.labels
            nodes = M.nodes_type['sentence_pair']
            dict_nodes = {k: i for i, k in enumerate(nodes)}
            for key in sentence_nodes.keys():
                id_node = sentence_nodes[key]
                split_key_node = key.split('_')
                t = split_key_node[0]
                cod = int(split_key_node[1])
                if t == 'train':
                    rows_train.append([
                        id_node, F[dict_nodes[id_node]][0],
                        F[dict_nodes[id_node]][1], train_labels[cod]
                    ])

                if t == 'test':
                    rows_test.append([
                        id_node, F[dict_nodes[id_node]][0],
                        F[dict_nodes[id_node]][1]
                    ])

        file_name_train = 'features_%s_pre_anotados_train.csv' % len(anotados)
        file_name_test = 'features_%s_pre_anotados_test.csv' % len(anotados)
        df_train = pd.DataFrame(rows_train, columns=columns)
        df_test = pd.DataFrame(rows_test, columns=columns[:3])
        df_train.to_csv(path_out + file_name_train, index=False)
        df_test.to_csv(path_out + file_name_test, index=False)