Exemplo n.º 1
0
def read_cnnHAR(filename_queue):
    class CNNHARRecord(object):
        pass

    result = CNNHARRecord()

    # Read a record, getting filenames from the filename_queue.  No
    # header or footer in the CIFAR-10 format, so we leave header_bytes
    # and footer_bytes at their default of 0.
    reader = tf.TextLineReader()
    result.key, value = reader.read(filename_queue)

    # Convert from a string to a vector of uint8 that is record_bytes long.
    record_defaults = [[1.0] for col in range(SIGNAL_SIZE * channels + 1)]

    record_bytes = tf.decode_csv(value, record_defaults=record_defaults)
    #print('!!!!!!!!!!!!!!!!!!! result.type', record_bytes)
    # The first bytes represent the label, which we convert from uint8->int32.
    result.signal = tf.cast(
        tf.strided_slice(record_bytes, [1], [SIGNAL_SIZE + 1]), tf.float32)
    result.signal = tf.reshape(result.signal, [SIGNAL_SIZE, channels])
    # labels-1 cause the logits is defaulted to start with 0~NUM_CLASS-1
    result.label = tf.cast(
        tf.strided_slice(record_bytes, [0], [1]) - 1, tf.float32)
    #print('!!!!!!!!!!!!!!!!!!! result.label before reshape', result.label)
    result.label = tf.reshape(result.label, [1, 1])

    return result
Exemplo n.º 2
0
def daoru(file_name):
    filename_queue = tf.train.string_input_producer([file_name])
    reader = tf.TextLineReader()
    key, value = reader.read(filename_queue)
    record_defaults = [[1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0],
                       [1.0], [1.0]]
    col1, col2, col3, col4, col5, col6, col7, col8, col9, col10 = tf.decode_csv(
        value, record_defaults=record_defaults)
    features = tf.concat([[col1], [col2], [col3], [col4], [col5], [col6],
                          [col7], [col8], [col9]], 0)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        for i in range(9000):
            d, l = sess.run([features, col10])
            data.append(d)
            label.append(l)
        for i in range(1000):
            d, l = sess.run([features, col10])
            data_yz.append(d)
            label_yz.append(l)

        coord.request_stop()
        coord.join(threads)
Exemplo n.º 3
0
def read_and_push_instance(filename_queue, instance_queue):
    reader = tf.TextLineReader(skip_header_lines=1)
    key, value = reader.read(filename_queue)
    x1, x2, target = tf.decode_csv(value, record_defaults=[[-1.], [-1.], [-1]])
    features = tf.stack([x1, x2])
    enqueue_instance = instance_queue.enqueue([features, target])
    return enqueue_instance
def map_fun(context):
    print(tf.__version__)
    sys.stdout.flush()
    tf.logging.set_verbosity(tf.logging.ERROR)
    jobName = context.jobName
    index = context.index
    clusterStr = context.properties["cluster"]
    delim = context.properties["SYS:delim"]
    epochs = int(context.properties["epochs"])
    data_file = context.properties["data.file"]
    print(index, clusterStr)
    sys.stdout.flush()
    clusterJson = json.loads(clusterStr)
    cluster = tf.train.ClusterSpec(cluster=clusterJson)
    server = tf.train.Server(cluster, job_name=jobName, task_index=index)
    sess_config = tf.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=False,
        device_filters=["/job:ps", "/job:worker/task:%d" % index])

    with tf.device(
            tf.train.replica_device_setter(worker_device='/job:worker/task:' +
                                           str(index),
                                           cluster=cluster)):
        filename_queue = tf.train.string_input_producer([data_file],
                                                        num_epochs=epochs)
        reader = tf.TextLineReader()
        key, value = reader.read(filename_queue)
        global_step = tf.train.get_or_create_global_step()
        global_step_inc = tf.assign_add(global_step, 1)
        is_chief = (index == 0)
        print(datetime.now().isoformat() +
              " started ------------------------------------")
        t = time.time()
        total_step = 0
        try:
            with tf.train.MonitoredTrainingSession(
                    master=server.target,
                    is_chief=is_chief,
                    config=sess_config,
                    checkpoint_dir="./target/tmp/input_output/" +
                    str(t)) as mon_sess:
                # while not mon_sess.should_stop():
                while True:
                    total_step, _, _ = mon_sess.run(
                        [global_step_inc, key, value])
                    if (total_step % 10000 == 0):
                        log_speed(total_step, t)
        except Exception as e:
            print('traceback.print_exc():')
            traceback.print_exc()
            sys.stdout.flush()
        finally:
            print(datetime.now().isoformat() +
                  " ended --------------------------------------")
            log_speed(total_step, t)
            SummaryWriterCache.clear()
Exemplo n.º 5
0
def read_data(file_queue):
    reader = tf.TextLineReader(skip_header_lines=1)
    key, value = reader.read(file_queue)

    defaults = [[0], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0]]
    cvscolunm = tf.io.decode_csv(value, defaults)

    featurecolumn = [i for i in cvscolunm[1:-1]]
    labelcolumn = cvscolunm[-1]

    return tf.stack(featurecolumn), labelcolumn
Exemplo n.º 6
0
    def que_and_batch_linear_regression():
        filename_queue = tf.train.string_input_producer(
            ['data-01-test-score.csv'], shuffle=False, name='filename_queue')

        reader = tf.TextLineReader()
        key, value = reader.read(filename_queue)

        record_defaults = [[0.], [0.], [0.], [0.]]
        xy = tf.decode_csv(value, record_defaults=record_defaults)

        train_x_batch, train_y_batch = \
            tf.train.batch([xy[0:-1], xy[-1:]], batch_size=10)

        X = tf.placeholder(tf.float32, shape=[None, 3])
        Y = tf.placeholder(tf.float32, shape=[None, 1])

        W = tf.Variable(tf.random_normal([3, 1]), name='weight')
        b = tf.Variable(tf.random_normal([1]), name='bias')

        hypothesis = tf.matmul(X, W) + b

        cost = tf.reduce_mean(tf.square(hypothesis - Y))

        optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
        train = optimizer.minimize(cost)

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in range(2001):
            x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
            cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
                                           feed_dict={
                                               X: x_batch,
                                               Y: y_batch
                                           })
            if step % 10 == 0:
                print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)

        coord.request_stop()
        coord.join(threads)

        print("Your score will be ",
              sess.run(hypothesis, feed_dict={X: [[100, 70, 101]]}))

        print(
            "Other scores will be ",
            sess.run(hypothesis, feed_dict={X: [[60, 70, 110], [90, 100,
                                                                80]]}))
    def load_train_batch(self):
        """Load a batch of training instances.
        """
        seed = random.randint(0, 2**31 - 1)
        # Load the list of training files into queues
        file_list = self.format_file_list(self.dataset_dir, 'train')
        image_paths_queue = tf.train.string_input_producer(
            file_list['image_file_list'], seed=seed, shuffle=True)
        cam_paths_queue = tf.train.string_input_producer(
            file_list['cam_file_list'], seed=seed, shuffle=True)
        self.steps_per_epoch = int(
            len(file_list['image_file_list']) // self.batch_size)

        # Load images
        img_reader = tf.WholeFileReader()
        _, image_contents = img_reader.read(image_paths_queue)
        image_seq = tf.image.decode_jpeg(image_contents)
        tgt_image, src_image_stack = \
            self.unpack_image_sequence(
                image_seq, self.img_height, self.img_width, self.num_source)

        # Load camera intrinsics
        cam_reader = tf.TextLineReader()
        _, raw_cam_contents = cam_reader.read(cam_paths_queue)
        rec_def = []
        for i in range(9):
            rec_def.append([1.])
        raw_cam_vec = tf.decode_csv(raw_cam_contents, record_defaults=rec_def)
        raw_cam_vec = tf.stack(raw_cam_vec)
        intrinsics = tf.reshape(raw_cam_vec, [3, 3])

        # Form training batches
        src_image_stack, tgt_image, intrinsics = \
                tf.train.batch([src_image_stack, tgt_image, intrinsics],
                               batch_size=self.batch_size)

        # Data augmentation
        image_all = tf.concat([tgt_image, src_image_stack], axis=3)
        image_all, intrinsics = self.data_augmentation(image_all, intrinsics,
                                                       self.img_height,
                                                       self.img_width)
        tgt_image = image_all[:, :, :, :3]
        src_image_stack = image_all[:, :, :, 3:]
        intrinsics = self.get_multi_scale_intrinsics(intrinsics,
                                                     self.num_scales)
        return tgt_image, src_image_stack, intrinsics
Exemplo n.º 8
0
  def extract_features_and_targets(self, filename_queue, batch_size):
    """Extracts features and targets from filename_queue."""
    reader = tf.TextLineReader()
    _, value = reader.read(filename_queue)
    feature_list = tf.decode_csv(value, record_defaults=self.RECORD_DEFAULTS)

    # Setting features dictionary.
    features = dict(zip(self.feature_names, feature_list))
    features = self._binarize_protected_features(features)
    features = tf.train.batch(features, batch_size)

    # Setting targets dictionary.
    targets = {}
    targets[self.target_column_name] = tf.reshape(
        tf.cast(
            tf.equal(
                features.pop(self.target_column_name),
                self.target_column_positive_value), tf.float32), [-1, 1])
    return features, targets
Exemplo n.º 9
0
  def read_data(self):
    """Provides images and camera intrinsics."""
    with tf.name_scope('data_loading'):
      with tf.name_scope('enqueue_paths'):
        seed = random.randint(0, 2**31 - 1)
        self.file_lists = self.compile_file_list(self.data_dir, self.input_file)
        image_paths_queue = tf.train.string_input_producer(
            self.file_lists['image_file_list'], seed=seed,
            shuffle=self.shuffle,
            num_epochs=(1 if not self.shuffle else None)
        )
        seg_paths_queue = tf.train.string_input_producer(
            self.file_lists['segment_file_list'], seed=seed,
            shuffle=self.shuffle,
            num_epochs=(1 if not self.shuffle else None))
        cam_paths_queue = tf.train.string_input_producer(
            self.file_lists['cam_file_list'], seed=seed,
            shuffle=self.shuffle,
            num_epochs=(1 if not self.shuffle else None))
        img_reader = tf.WholeFileReader()
        _, image_contents = img_reader.read(image_paths_queue)
        seg_reader = tf.WholeFileReader()
        _, seg_contents = seg_reader.read(seg_paths_queue)
        if self.file_extension == 'jpg':
          image_seq = tf.image.decode_jpeg(image_contents)
          seg_seq = tf.image.decode_jpeg(seg_contents, channels=3)
        elif self.file_extension == 'png':
          image_seq = tf.image.decode_png(image_contents, channels=3)
          seg_seq = tf.image.decode_png(seg_contents, channels=3)

      with tf.name_scope('load_intrinsics'):
        cam_reader = tf.TextLineReader()
        _, raw_cam_contents = cam_reader.read(cam_paths_queue)
        rec_def = []
        for _ in range(9):
          rec_def.append([1.0])
        raw_cam_vec = tf.decode_csv(raw_cam_contents, record_defaults=rec_def)
        raw_cam_vec = tf.stack(raw_cam_vec)
        intrinsics = tf.reshape(raw_cam_vec, [3, 3])

      with tf.name_scope('convert_image'):
        image_seq = self.preprocess_image(image_seq)  # Converts to float.

      if self.random_color:
        with tf.name_scope('image_augmentation'):
          image_seq = self.augment_image_colorspace(image_seq)

      image_stack = self.unpack_images(image_seq)
      seg_stack = self.unpack_images(seg_seq)

      if self.flipping_mode != FLIP_NONE:
        random_flipping = (self.flipping_mode == FLIP_RANDOM)
        with tf.name_scope('image_augmentation_flip'):
          image_stack, seg_stack, intrinsics = self.augment_images_flip(
              image_stack, seg_stack, intrinsics,
              randomized=random_flipping)

      if self.random_scale_crop:
        with tf.name_scope('image_augmentation_scale_crop'):
          image_stack, seg_stack, intrinsics = self.augment_images_scale_crop(
              image_stack, seg_stack, intrinsics, self.img_height,
              self.img_width)

      with tf.name_scope('multi_scale_intrinsics'):
        intrinsic_mat = self.get_multi_scale_intrinsics(intrinsics,
                                                        self.num_scales)
        intrinsic_mat.set_shape([self.num_scales, 3, 3])
        intrinsic_mat_inv = tf.matrix_inverse(intrinsic_mat)
        intrinsic_mat_inv.set_shape([self.num_scales, 3, 3])

      if self.imagenet_norm:
        im_mean = tf.tile(
            tf.constant(IMAGENET_MEAN), multiples=[self.seq_length])
        im_sd = tf.tile(
            tf.constant(IMAGENET_SD), multiples=[self.seq_length])
        image_stack_norm = (image_stack - im_mean) / im_sd
      else:
        image_stack_norm = image_stack

      with tf.name_scope('batching'):
        if self.shuffle:
          (image_stack, image_stack_norm, seg_stack, intrinsic_mat,
           intrinsic_mat_inv) = tf.train.shuffle_batch(
               [image_stack, image_stack_norm, seg_stack, intrinsic_mat,
                intrinsic_mat_inv],
               batch_size=self.batch_size,
               num_threads=self.threads,
               capacity=self.queue_size + QUEUE_BUFFER * self.batch_size,
               min_after_dequeue=self.queue_size)
        else:
          (image_stack, image_stack_norm, seg_stack, intrinsic_mat,
           intrinsic_mat_inv) = tf.train.batch(
               [image_stack, image_stack_norm, seg_stack, intrinsic_mat,
                intrinsic_mat_inv],
               batch_size=self.batch_size,
               num_threads=1,
               capacity=self.queue_size + QUEUE_BUFFER * self.batch_size)
    return (image_stack, image_stack_norm, seg_stack, intrinsic_mat,
            intrinsic_mat_inv)
Exemplo n.º 10
0
    x2=np.random.uniform(0,2)
    
    if x1**2 +x2**2 <= 1:
        data.append([np.random.normal(x1,0.1),np.random.normal(x2,0.1)])
        label.append(0)
    else:
        data.append([np.random.normal(x1,0.1),np.random.normal(x2,0.1)])
        label.append(1)
#翻转
data = np.hstack(data).reshape(-1,2)
label =np.hstack(label).reshape(-1,1)
#reader = csv.reader(open('f://dos1.csv'))'''
#读取csv文件中的内容
filename_queue1 = tf.train.string_input_producer(["f://spoofing1.csv"])

reader1 = tf.TextLineReader()
key1, value1 = reader1.read(filename_queue1)
filename_queue2 = tf.train.string_input_producer(["f://spoofing2.csv"])

reader2 = tf.TextLineReader()
key2, value2 = reader2.read(filename_queue2)

record_defaults = [[1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0],
                   [1.0], [1.0]]
col1, col2, col3, col4, col5, col6, col7, col8, col9, col10 = tf.decode_csv(
    value1, record_defaults=record_defaults)
features = tf.concat(
    [[col1], [col2], [col3], [col4], [col5], [col6], [col7], [col8], [col9]],
    0)

init_op = tf.global_variables_initializer()
import tensorflow.compat.v1 as tf

tf.compat.v1.disable_eager_execution()
filename_queue = tf.train.string_input_producer(
    ['data_for_linear_regression_csv_data_load/test.csv'],
    shuffle=False,
    name='filename_queue')
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)

record_defaults = [[0.], [0.], [0.], [0.]]
#t=tf.decode_csv(key,record_defaults=record_defaults)
xy = tf.decode_csv(value, record_defaults=record_defaults)

sess = tf.Session()
for i in range(6):
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    print(sess.run([xy]))
    coord.request_stop()
    coord.join(threads)