예제 #1
0
파일: predict.py 프로젝트: 530824679/YOLOv1
def predict(test_dir, checkpoints):
    """
    本函数用于对测试
    :param test_dir:待测试的目录
    :param checkpoints:权重文件
    :return:
    """
    input = tf.placeholder(tf.float32, [
        None, model_params['image_size'], model_params['image_size'],
        model_params['channels']
    ],
                           name='input')

    # 构建网络
    Model = network.Network(is_train=False)
    logits = Model._build_network(input)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, checkpoints)

        file_list = os.listdir(test_dir)
        for filename in file_list:
            file = os.path.join(test_dir, filename)

            image = cv2.imread(file)
            image_width = np.shape(image)[0]
            image_height = np.shape(image)[1]
            image = cv2.resize(
                image,
                (model_params['image_size'], model_params['image_size']))
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
            image = (image / 255.0) * 2.0 - 1.0

            batch_image = np.zeros([
                1, model_params['image_size'], model_params['image_size'],
                model_params['channels']
            ])
            batch_image[0, :, :, :] = image

            output = sess.run(logits, feed_dict={input: batch_image})
            result = post_processing(output)

            for i in range(len(result)):
                result[i][1] *= (1.0 * image_width /
                                 model_params['image_size'])
                result[i][2] *= (1.0 * image_height /
                                 model_params['image_size'])
                result[i][3] *= (1.0 * image_width /
                                 model_params['image_size'])
                result[i][4] *= (1.0 * image_height /
                                 model_params['image_size'])

            draw_results(file, result)
예제 #2
0
def freeze_graph(checkpoints_path, output_graph):
    """
    :param checkpoints_path: ckpt文件路径
    :param output_graph: pb模型保存路径
    :return:
    """
    with tf.Graph().as_default():
        image = tf.placeholder(shape=[None, 608, 608, 3],
                               dtype=tf.float32,
                               name='inputs')

        # 指定输出的节点名称,该节点名称必须是原模型中存在的节点
        output_node_names = "reorg_layer/obj_probs,reorg_layer/class_probs,reorg_layer/bboxes_probs"

        # 从模型代码中获取结构
        Model = network.Network(is_train=False)
        logits = Model.build_network(image)
        output = Model.reorg_layer(logits, model_params['anchors'])

        # 从meta中获取结构
        #saver = tf.train.import_meta_graph(checkpoints_path + '.meta', clear_devices=True)

        # 获得默认的图
        graph = tf.get_default_graph()

        # 返回一个序列化的图代表当前的图
        input_graph_def = graph.as_graph_def()

        with tf.Session() as sess:
            saver = tf.train.Saver()
            # 恢复图并得到数据
            saver.restore(sess, checkpoints_path)

            # 模型持久化,将变量值固定
            output_graph_def = graph_util.convert_variables_to_constants(
                sess=sess,
                input_graph_def=input_graph_def,
                output_node_names=output_node_names.split(","))

            # 删除训练层,只保留主干
            output_graph_def = graph_util.remove_training_nodes(
                output_graph_def)

            # 保存模型
            with tf.gfile.GFile(output_graph, "wb") as f:

                # 序列化输出
                f.write(output_graph_def.SerializeToString())

            # 得到当前图有几个操作节点
            print("%d ops in the final graph." % len(output_graph_def.node))
예제 #3
0
 def test_label(self):
     n1 = DummyNode1(['TEST1:123', 'OTHER:abc'], [])
     n2 = DummyNode2(['OTHER:abc'], [])
     n3 = DummyNode3(['TEST2:456', 'SOME:rtf'], [])
     n4 = DummyNode3(['TEST2:456', 'SOME:rtf'], [])
     graph = network.Network()
     graph.add_node(n1)
     graph.add_node(n2)
     graph.add_node(n3)
     graph.add_node(n4)
     graph.add_edge(edge.Edge(n3, n1, 'LINKS', {}))
     graph.add_edge(edge.Edge(n3, n2, 'LINKS', {}))
     graph.add_edge(edge.Edge(n1, n2, 'LINKS', {}))
     self.assertEqual(len(list(graph.get_nodes())), 3)
     self.assertEqual(len(set(graph.nodes.values())), 3)
     self.assertEqual(len(list(graph.get_edges_by_label('LINKS'))), 3)
     graph.save(self.temp_file_path)
     graph = network.Network()
     with io.open(self.temp_file_path, 'r', encoding='utf-8',
                  newline='') as f:
         g = json.loads(f.read())
         graph.load_from_dict(g)
     self.assertEqual(len(list(graph.get_nodes())), 3)
     self.assertEqual(len(list(graph.get_edges_by_label('LINKS'))), 3)
예제 #4
0
def test(test_path):
    """
    本函数用于对测试
    :param test_path:待测试的目录
    :return:
    """
    input = tf.placeholder(tf.float32, [
        None, model_params['input_height'], model_params['input_width'],
        model_params['channels']
    ],
                           name='input')

    # 构建网络
    Model = network.Network(is_train=False)
    logits = Model.build_network(input)
    output = Model.reorg_layer(logits, model_params['anchors'])

    data = Dataset()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, "./checkpoints/model.ckpt-181")

        file_list = os.listdir(test_path)
        for filename in file_list:
            file = os.path.join(test_path, filename)

            pts = data.load_pcd(file)
            roi_pts = data.filter_roi(pts)
            bev_image = data.transform_bev_image(roi_pts)
            bev_data = bev_image[np.newaxis, ...]

            bboxes, obj_probs, class_probs = sess.run(
                output, feed_dict={input: bev_data})
            bboxes, scores, class_max_index = postprocess(
                bboxes, obj_probs, class_probs)
            # results = postprocess(bboxes, obj_probs, class_probs)
            # for result in results:
            #     angle = data.calc_angle(result[5], result[4])
            #     draw_rotated_box(bev_image, int(result[0]), int(result[1]), int(result[2]), int(result[3]), angle, result[6], int(result[7]))

            for bbox, score, class_index in zip(bboxes, scores,
                                                class_max_index):
                angle = data.calc_angle(bbox[5], bbox[4])
                draw_rotated_box(bev_image, int(bbox[0]), int(bbox[1]),
                                 int(bbox[2]), int(bbox[3]), angle, score,
                                 class_index)
            cv2.imshow("image", bev_image)
            cv2.waitKey(0)
예제 #5
0
from model import Layers, load_data, network

# hyper-parameters
learning_rate = 0.01
num_iter = 10000
batch_size = 128
lr_decay = None

# network
net = network.Network(learning_rate=learning_rate,
                      num_iter=num_iter,
                      batch_size=batch_size,
                      lr_decay=lr_decay)

# layers
net.add_layer(Layers.Linear(n_in=32 * 32 * 3, n_out=1024))
net.add_layer(Layers.BatchNorm1d(n_in=1024))
net.add_layer(Layers.ReLU())
net.add_layer(Layers.Dropout(keep_prob=0.5))
net.add_layer(Layers.Linear(n_in=1024, n_out=1024))
net.add_layer(Layers.BatchNorm1d(n_in=1024))
net.add_layer(Layers.ReLU())
net.add_layer(Layers.Dropout(keep_prob=0.5))
net.add_layer(Layers.Linear(n_in=1024, n_out=10))
net.add_layer(Layers.CrossEntropyLoss())

# data
train, val, test = load_data.load_cifar10()
net.load_data(train, val, test)

# training
예제 #6
0
def train():
    start_step = 0
    restore = solver_params['restore']
    checkpoint_dir = path_params['checkpoints_dir']
    checkpoints_name = path_params['checkpoints_name']
    tfrecord_dir = path_params['tfrecord_dir']
    tfrecord_name = path_params['train_tfrecord_name']
    log_dir = path_params['logs_dir']
    batch_size = solver_params['batch_size']
    dataset_path = path_params['train_data_path']

    # 配置GPU
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(gpu_options=gpu_options)

    # 解析得到训练样本以及标注
    data_num = len(open(dataset_path, 'r').readlines())
    batch_num = int(math.ceil(float(data_num) / batch_size))
    data = tfrecord.TFRecord()
    dataset = data.create_dataset(dataset_path,
                                  batch_num,
                                  batch_size=batch_size,
                                  is_shuffle=True)
    iterator = dataset.make_one_shot_iterator()
    images, y_true = iterator.get_next()

    images.set_shape([None, 608, 608, 3])
    y_true.set_shape([None, 19, 19, 5, 7 + model_params['num_classes']])

    # 构建网络
    Model = network.Network(is_train=True)
    logits = Model.build_network(images)

    # 计算损失函数
    total_loss, diou_loss, angle_loss, confs_loss, class_loss = Model.calc_loss(
        logits, y_true)

    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(solver_params['lr'],
                                               global_step,
                                               solver_params['decay_steps'],
                                               solver_params['decay_rate'],
                                               staircase=True)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(total_loss, global_step=global_step)

    # 配置tensorboard
    tf.summary.scalar("learning_rate", learning_rate)
    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar("diou_loss", diou_loss)
    tf.summary.scalar("angle_loss", angle_loss)
    tf.summary.scalar("confs_loss", confs_loss)
    tf.summary.scalar("class_loss", class_loss)

    # 配置tensorboard
    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(log_dir,
                                           graph=tf.get_default_graph(),
                                           flush_secs=60)

    # 模型保存
    save_variable = tf.global_variables()
    saver = tf.train.Saver(save_variable, max_to_keep=50)
    with tf.Session(config=config) as sess:
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])

        if restore == True:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                stem = os.path.basename(ckpt.model_checkpoint_path)
                restore_step = int(stem.split('.')[1].split('-')[-1])
                start_step = restore_step
                sess.run(global_step.assign(restore_step))
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Restoreing from {}'.format(ckpt.model_checkpoint_path))
            else:
                print("Failed to find a checkpoint")

        summary_writer.add_graph(sess.graph)

        import time
        print('\n----------- start to train -----------\n')
        for epoch in range(start_step + 1, solver_params['epoches']):
            train_epoch_loss, train_epoch_diou_loss, train_epoch_angle_loss, train_epoch_confs_loss, train_epoch_class_loss = [], [], [], [], []
            for index in tqdm(range(batch_num)):
                start = time.time()
                _, summary_, loss_, diou_loss_, angle_loss_, confs_loss_, class_loss_, global_step_, lr = sess.run(
                    [
                        train_op, summary_op, total_loss, diou_loss,
                        angle_loss, confs_loss, class_loss, global_step,
                        learning_rate
                    ])

                print(
                    "Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, diou_loss: {:.3f}, angle_loss: {:.3f},confs_loss: {:.3f}, class_loss: {:.3f}"
                    .format(epoch, global_step_, lr, loss_, diou_loss_,
                            angle_loss_, confs_loss_, class_loss_))
                print("train time:", time.time() - start)
                train_epoch_loss.append(loss_)
                train_epoch_diou_loss.append(diou_loss_)
                train_epoch_angle_loss.append(angle_loss_)
                train_epoch_confs_loss.append(confs_loss_)
                train_epoch_class_loss.append(class_loss_)

                summary_writer.add_summary(summary_, global_step_)

            train_epoch_loss, train_epoch_diou_loss, train_epoch_angle_loss, train_epoch_confs_loss, train_epoch_class_loss = np.mean(
                train_epoch_loss), np.mean(train_epoch_diou_loss), np.mean(
                    train_epoch_angle_loss), np.mean(
                        train_epoch_confs_loss), np.mean(
                            train_epoch_class_loss)
            print(
                "Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, diou_loss: {:.3f}, angle_loss: {:.3f},confs_loss: {:.3f}, class_loss: {:.3f}"
                .format(epoch, global_step_, lr, train_epoch_loss,
                        train_epoch_diou_loss, train_epoch_angle_loss,
                        train_epoch_confs_loss, train_epoch_class_loss))
            saver.save(sess,
                       os.path.join(checkpoint_dir, checkpoints_name),
                       global_step=epoch)

        sess.close()
예제 #7
0
파일: train.py 프로젝트: 530824679/YOLOv3
def train():
    start_step = 0
    restore = solver_params['restore']
    pre_train = solver_params['pre_train']
    checkpoint_dir = path_params['checkpoints_dir']
    checkpoints_name = path_params['checkpoints_name']
    tfrecord_dir = path_params['tfrecord_dir']
    tfrecord_name = path_params['train_tfrecord_name']
    log_dir = path_params['logs_dir']
    batch_size = solver_params['batch_size']
    num_class = len(model_params['classes'])

    # 配置GPU
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(gpu_options=gpu_options)

    # 解析得到训练样本以及标注
    data = tfrecord.TFRecord()
    train_tfrecord = os.path.join(tfrecord_dir, tfrecord_name)
    data_num = total_sample(train_tfrecord)
    batch_num = int(math.ceil(float(data_num) / batch_size))
    dataset = data.create_dataset(train_tfrecord, batch_num, batch_size=batch_size, is_shuffle=True)
    iterator = dataset.make_one_shot_iterator()
    inputs, y_true_13, y_true_26, y_true_52 = iterator.get_next()

    inputs.set_shape([None, 416, 416, 3])
    y_true_13.set_shape([None, 13, 13, 3, 5+num_class])
    y_true_26.set_shape([None, 26, 26, 3, 5+num_class])
    y_true_52.set_shape([None, 52, 52, 3, 5+num_class])

    y_true = [y_true_13, y_true_26, y_true_52]

    # 构建网络
    with tf.variable_scope('yolov3'):
        model = network.Network(len(model_params['classes']), model_params['anchors'], is_train=True)
        logits = model.build_network(inputs)

    # 计算损失函数
    loss = model.calc_loss(logits, y_true)
    l2_loss = tf.losses.get_regularization_loss()

    # restore_include = None
    # restore_exclude = ['yolov3/yolov3_head/Conv_14', 'yolov3/yolov3_head/Conv_6', 'yolov3/yolov3_head/Conv_22']
    # update_part = ['yolov3/yolov3_head']
    # saver_to_restore = tf.train.Saver(var_list=tf.contrib.framework.get_variables_to_restore(include=restore_include, exclude=restore_exclude))
    # update_vars = tf.contrib.framework.get_variables_to_restore(include=update_part)

    global_step = tf.Variable(float(0), trainable=False)#, collections=[tf.GraphKeys.LOCAL_VARIABLES])
    learning_rate = tf.train.exponential_decay(solver_params['lr'], global_step, solver_params['decay_steps'], solver_params['decay_rate'], staircase=True)
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss[0] + l2_loss, var_list=None, global_step=global_step)
        #gvs = optimizer.compute_gradients(loss[0] + l2_loss, var_list=update_vars)
        #clip_grad_var = [gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]] for gv in gvs]
        #train_op = optimizer.apply_gradients(clip_grad_var, global_step=global_step)

    tf.summary.scalar("learning_rate", learning_rate)
    tf.summary.scalar('total_loss', loss[0])
    tf.summary.scalar('loss_xy', loss[1])
    tf.summary.scalar('loss_wh', loss[2])
    tf.summary.scalar('loss_conf', loss[3])
    tf.summary.scalar('loss_class', loss[4])

    # 配置tensorboard
    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(log_dir, graph=tf.get_default_graph(), flush_secs=60)

    save_variable = tf.global_variables()
    saver_to_restore = tf.train.Saver(save_variable, max_to_keep=50)
    with tf.Session(config=config) as sess:
        sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

        if restore == True:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                stem = os.path.basename(ckpt.model_checkpoint_path)
                restore_step = int(stem.split('.')[0].split('-')[-1])
                start_step = restore_step
                sess.run(global_step.assign(restore_step))
                saver_to_restore.restore(sess, ckpt.model_checkpoint_path)
                print('Restoreing from {}'.format(ckpt.model_checkpoint_path))
            else:
                print("Failed to find a checkpoint")

        if pre_train == True:
            saver_to_restore.restore(sess, os.path.join(path_params['weights_dir'], 'yolov3.ckpt'))

        summary_writer.add_graph(sess.graph)

        for epoch in range(start_step + 1, solver_params['total_epoches']):
            train_epoch_loss, train_epoch_xy_loss, train_epoch_wh_loss, train_epoch_confs_loss, train_epoch_class_loss = [], [], [], [], []
            for index in tqdm(range(batch_num)):
                _, summary_, loss_, xy_loss_, wh_loss_, confs_loss_, class_loss_, global_step_, lr = sess.run([train_op, summary_op, loss[0], loss[1], loss[2], loss[3], loss[4], global_step, learning_rate])
                print("Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, xy_loss: {:.3f}, wh_loss: {:.3f}, confs_loss: {:.3f}, class_loss: {:.3f}".format(
                        epoch, global_step_, lr, loss_, xy_loss_, wh_loss_, confs_loss_, class_loss_))

                train_epoch_loss.append(loss_)
                train_epoch_xy_loss.append(xy_loss_)
                train_epoch_wh_loss.append(wh_loss_)
                train_epoch_confs_loss.append(confs_loss_)
                train_epoch_class_loss.append(class_loss_)

                summary_writer.add_summary(summary_, global_step_)

            train_epoch_loss, train_epoch_xy_loss, train_epoch_wh_loss, train_epoch_confs_loss, train_epoch_class_loss = np.mean(train_epoch_loss), np.mean(train_epoch_xy_loss), np.mean(train_epoch_wh_loss),np.mean(train_epoch_confs_loss), np.mean(train_epoch_class_loss)
            print("Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, xy_loss: {:.3f}, wh_loss: {:.3f},confs_loss: {:.3f}, class_loss: {:.3f}".format(epoch, global_step_, lr, train_epoch_loss, train_epoch_xy_loss, train_epoch_wh_loss, train_epoch_confs_loss, train_epoch_class_loss))
            saver_to_restore.save(sess, os.path.join(checkpoint_dir, checkpoints_name), global_step=epoch)

        sess.close()
예제 #8
0
파일: train.py 프로젝트: 530824679/YOLOv1
def train():
    start_step = 0
    log_step = solver_params['log_step']
    display_step = solver_params['display_step']
    restore = solver_params['restore']
    checkpoint_dir = path_params['checkpoints_dir']
    checkpoints_name = path_params['checkpoints_name']
    tfrecord_dir = path_params['tfrecord_dir']
    tfrecord_name = path_params['train_tfrecord_name']
    log_dir = path_params['logs_dir']
    weights_file = path_params['weights_file']

    # 配置GPU
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(gpu_options=gpu_options)

    # 解析得到训练样本以及标注
    data = tfrecord.TFRecord()
    train_tfrecord = os.path.join(tfrecord_dir, tfrecord_name)
    image_batch, label_batch = data.parse_batch_examples(train_tfrecord)

    # 定义输入的占位符
    inputs = tf.placeholder(dtype=tf.float32,
                            shape=[
                                None, model_params['image_size'],
                                model_params['image_size'],
                                model_params['channels']
                            ],
                            name='inputs')
    outputs = tf.placeholder(dtype=tf.float32,
                             shape=[
                                 None, model_params['cell_size'],
                                 model_params['cell_size'],
                                 5 + model_params['num_classes']
                             ],
                             name='outputs')

    # 构建网络
    Model = network.Network(is_train=True)
    logits = Model._build_network(inputs)

    # 计算损失函数
    Losses = loss_utils.Loss(logits, outputs, 'loss')
    loss_op = tf.losses.get_total_loss()

    vars = tf.trainable_variables()
    l2_reg_loss_op = tf.add_n([tf.nn.l2_loss(var) for var in vars
                               ]) * solver_params['weight_decay']
    total_loss = loss_op + l2_reg_loss_op
    tf.summary.scalar('total_loss', total_loss)

    # 创建全局的步骤
    global_step = tf.train.create_global_step()
    # 设定变化的学习率
    learning_rate = tf.train.exponential_decay(solver_params['learning_rate'],
                                               global_step,
                                               solver_params['decay_steps'],
                                               solver_params['decay_rate'],
                                               solver_params['staircase'],
                                               name='learning_rate')

    # 设置优化器
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        # 采用的优化方法是随机梯度下降
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=learning_rate)
        train_op = slim.learning.create_train_op(total_loss, optimizer,
                                                 global_step)

    # 模型保存
    save_variable = tf.global_variables()
    saver = tf.train.Saver(save_variable, max_to_keep=1000)

    # 配置tensorboard
    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(log_dir,
                                           graph=tf.get_default_graph(),
                                           flush_secs=60)

    with tf.Session(config=config) as sess:
        init_var_op = tf.global_variables_initializer()
        sess.run(init_var_op)

        if weights_file is not None:
            print('Restoring weights from: ' + weights_file)
            saver.restore(sess, weights_file)

        if restore == True:
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                stem = os.path.basename(ckpt.model_checkpoint_path)
                restore_step = int(stem.split('.')[0].split('-')[-1])
                start_step = restore_step
                sess.run(global_step.assign(restore_step))
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Restoreing from {}'.format(ckpt.model_checkpoint_path))
            else:
                print("Failed to find a checkpoint")

        coordinate = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coordinate, sess=sess)
        summary_writer.add_graph(sess.graph)

        for epoch in range(start_step + 1, solver_params['max_iter']):
            start_time = time.time()

            if coordinate.should_stop():
                break
            image, label = sess.run([image_batch, label_batch])
            feed_dict = {inputs: image, outputs: label}
            _, loss, current_global_step = sess.run(
                [train_op, total_loss, global_step], feed_dict=feed_dict)

            end_time = time.time()

            if epoch % solver_params['save_step'] == 0:
                save_path = saver.save(sess,
                                       os.path.join(checkpoint_dir,
                                                    checkpoints_name),
                                       global_step=epoch)
                print('Save modle into {}....'.format(save_path))

            if epoch % log_step == 0:
                summary = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary, global_step=epoch)

            if epoch % display_step == 0:
                per_iter_time = end_time - start_time
                print("step:{:.0f}  total_loss:  {:.5f} {:.2f} s/iter".format(
                    epoch, loss, per_iter_time))

        coordinate.request_stop()
        coordinate.join(threads)
        sess.close()
예제 #9
0
파일: test.py 프로젝트: synek/NeuralNotWork
with open('/home/rory/projects/neuralnotwork/data/winequality-red.csv') as f:
    raw = f.read().split('\n')

training_total = [r.split(';') for r in raw][1:]
training_data = [sample[:-1] for sample in training_total]
training_labels = [sample[-1] for sample in training_total]

training_data = [[float(i) for i in j] for j in training_data]
for i, j in enumerate(training_labels):
    if (j == ''):
        training_labels.pop(i)
        training_data.pop(i)
training_labels = [int(l) for l in training_labels]


def small_num():
    return random.uniform(0, 1)


training_labels = [n + small_num() for n in training_labels]

n_sig = network.Network(training_data, training_labels, None, None)

n_sig.addLayer(11, 'sigmoid')
n_sig.addLayer(15, 'sigmoid')
n_sig.addLayer(4, 'sigmoid')
n_sig.addLayer(1, 'linear', is_output=True)

n_sig.train()