Пример #1
0
#csv_name = "tomoto_and_unclass.csv"
csv = pd.read_csv(csv_name, header=None)
#test_csv_name = 'tomato_test_only_tomato.csv'
test_csv_name = 'tomato_test_only_tomato_original.csv'
test_csv = pd.read_csv(test_csv_name, header=None)
#path col=0
#label col=1
sample_size = csv.shape[0]
n_class = len(np.unique(csv[1]))
iteration_num = int(sample_size / a.batch_size * a.epoch)
seed = 1141919

#--------------ImageLoad-----------------#
with tf.name_scope('LoadImage'):
    filename_queue = tf.train.string_input_producer([csv_name], shuffle=True)
    reader = tf.TextLineReader()
    _, val = reader.read(filename_queue)
    record_defaults = [["a"], [0], ["a"]]
    _, label, path = tf.decode_csv(val, record_defaults=record_defaults)
    readfile = tf.read_file(path)
    image = tf.image.decode_jpeg(readfile, channels=3)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    image = tf.cast(image, dtype=np.float32)
    image = tf.image.resize_images(image, (256, 256))

    def noisy(img):
        img_f = tf.reshape(img, [-1])
        a_m = tf.where(img_f > 1 / 255.0, tf.zeros_like(img_f),
                       tf.ones(img_f.shape))
        a_m = tf.reshape(a_m, img.shape)
        noise = tf.random_uniform(a_m.shape,
import tensorflow as tf

filename_queue = tf.train.string_input_producer(['data-01-test-score.csv'],
                                                shuffle=False,
                                                name='filename_queue')

reader = tf.TextLineReader()
key, value = reader.read(filename_queue)

# 값이 없을 경우의 기본값 설정
record_defaults = [[0.], [0.], [0.], [0.]]
xy = tf.decode_csv(value, record_defaults=record_defaults)

# collect batches of csv in
train_x_batch, train_y_batch = tf.train.batch([xy[0:-1], xy[-1:]],
                                              batch_size=10)

# placeholders
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])

W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')

# 가설
hypothesis = tf.matmul(X, W) + b

# Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))

# Minimize
Пример #3
0
                worker_device="/job:worker/task:%d" % FLAGS.task_index,
                cluster=cluster)):
        #More to come on is_chief...
        is_chief = FLAGS.task_index == 0
        # count the number of global steps
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        #inout data
        trainset_files = [
            ("/home/andy_shen/code/url_svmlight/Day%d" % i) + ".svm"
            for i in range(121)
        ]
        train_filename_queue = tf.train.string_input_producer(trainset_files)
        train_reader = tf.TextLineReader()
        train_data_line = train_reader.read(train_filename_queue)
        with tf.name_scope('placeholder'):
            y = tf.placeholder(tf.float32, [None, 1])
            sp_indices = tf.placeholder(tf.int64)
            shape = tf.placeholder(tf.int64)
            ids_val = tf.placeholder(tf.int64)
            weights_val = tf.placeholder(tf.float32)

        with tf.name_scope('parameter'):
            #x_data = tf.sparse_to_dense(sp_indices, shape, weights_val, 0., name=None)
            x_data = tf.SparseTensor(sp_indices, weights_val, shape)
            #weight =  tf.Variable(tf.constant(0.0, shape = [num_features, 1]))
#b = tf.Variable(tf.constant(0.1, shape=[1]))

        loss = SVMModel_with_linear(x_data, y, num_features)