コード例 #1
0
    def remove_background(source_path: str, output_path: str):
        """
        Resize the background of images
        :param source_path: source directory path
        :param output_path: output directory path
        :return:
        """
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        directories = os.listdir(source_path)
        for directory in directories:
            out_directory_path = os.path.join(output_path, directory)
            if not os.path.exists(out_directory_path):
                os.makedirs(out_directory_path)
            for file in os.listdir(os.path.join(source_path, directory)):
                # Resize
                image = cv2.imread(os.path.join(source_path, directory, file))
                image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                removed = remove_background.remove_background(image_rgb)
                last_index = file.rfind(".")
                origin_name = file[0:last_index]
                output_file_name = os.path.join(
                    out_directory_path,
                    str(origin_name) + "_background_removed" + ".png")
                cv2.imwrite(output_file_name, removed)
                print("Saved: ", output_file_name)
コード例 #2
0
def test(model_path: str = None):
    data = SeedlingsData()
    data.load(train_data_paths=[],
              test_data_paths=[constants.test_output_resize_file_path])

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # Load network
    net = load_model(model_path)
    net.cuda()

    create_submission_file("submission.txt")

    with open("submission.txt", "a") as submission_file:
        for test_image_dir, test_image in data.generate_test_data():
            test_tensor = normalize(torch.from_numpy(test_image))
            test_x = Variable(test_tensor, volatile=True).cuda().float()

            if net.model_name == 'resnet50+':
                prob, mask, _ = remove_background(test_image)
                plant_area = np.sum(mask, (1, 2))
                avg_prob = np.divide(np.sum(prob * mask, (1, 2)),
                                     plant_area,
                                     out=np.zeros_like(plant_area).astype(
                                         np.float),
                                     where=plant_area != 0)
                avg_green = np.divide(
                    np.sum(test_image[:, 1, :, :] * mask, (1, 2)),
                    plant_area,
                    out=np.zeros_like(plant_area).astype(np.float),
                    where=plant_area != 0)
                plant_area = np.reshape(plant_area, (data.batch_size, 1))
                plant_area = Variable(
                    torch.from_numpy(plant_area)).cuda().float()

                avg_prob = np.reshape(avg_prob, (data.batch_size, 1))
                avg_prob = Variable(torch.from_numpy(avg_prob)).cuda().float()

                avg_green = np.reshape(avg_green, (data.batch_size, 1))
                avg_green = Variable(
                    torch.from_numpy(avg_green)).cuda().float()

                test_output = net(test_x, plant_area, avg_prob, avg_green)
            else:
                test_output = net(test_x)

            _, predict_y = torch.max(test_output, 1)
            print("Predict result:")
            print(predict_y)
            file_name = os.path.split(test_image_dir)[1].split("_")[0] + '.' + \
                        os.path.split(test_image_dir)[1].split('.')[1]
            string_to_write = "{},{}\r\n".format(
                file_name, SeedlingsData.seedlings_labels[predict_y.data[0]])
            submission_file.write(string_to_write)
            print(string_to_write)

    del net
コード例 #3
0
def train_epoch(net: Net, data: SeedlingsData, epoch: int,
                normalize: transforms.Normalize, optimizer: Optimizer):
    losses = []
    train_total = 0
    train_right = 0
    for batch_index, images, labels in data.generate_train_data():
        tensor = normalize(torch.from_numpy(images))
        batch_x = Variable(tensor).cuda().float()
        batch_y = Variable(torch.from_numpy(labels)).cuda().long()

        if net.model_name == 'resnet50+':
            prob, mask, _ = remove_background(images)
            plant_area = np.sum(mask, (1, 2))
            avg_prob = np.divide(np.sum(prob * mask, (1, 2)),
                                 plant_area,
                                 out=np.zeros_like(plant_area).astype(
                                     np.float),
                                 where=plant_area != 0)
            avg_green = np.divide(np.sum(images[:, 1, :, :] * mask, (1, 2)),
                                  plant_area,
                                  out=np.zeros_like(plant_area).astype(
                                      np.float),
                                  where=plant_area != 0)
            plant_area = np.reshape(plant_area, (data.batch_size, 1))
            plant_area = Variable(torch.from_numpy(plant_area)).cuda().float()

            avg_prob = np.reshape(avg_prob, (data.batch_size, 1))
            avg_prob = Variable(torch.from_numpy(avg_prob)).cuda().float()

            avg_green = np.reshape(avg_green, (data.batch_size, 1))
            avg_green = Variable(torch.from_numpy(avg_green)).cuda().float()

            output = net(batch_x, plant_area, avg_prob, avg_green)
        else:
            output = net(batch_x)

        _, predict_batch_y = torch.max(output, 1)

        optimizer.zero_grad()
        criterion = nn.CrossEntropyLoss()
        loss = criterion(output, batch_y)
        loss.backward()
        optimizer.step()
        losses.append(loss.data[0])
        train_total += batch_y.size(0)
        train_right += sum(
            predict_batch_y.data.cpu().numpy() == batch_y.data.cpu().numpy())
        accuracy = train_right / train_total
        print("epoch:{}, batch index:{}, accuracy:{}, loss:{}".format(
            epoch, batch_index, accuracy, loss.data[0]))
        # Validate
        if batch_index != 0 and batch_index % 100 == 0:
            pass
    accuracy = train_right / train_total
    print("epoch:{}, , average accuracy:{}, average train loss:{}".format(
        epoch, accuracy,
        sum(losses) / len(losses)))
コード例 #4
0
def validate_analysis(net: Net, data: SeedlingsData,
                      normalize: transforms.Normalize):
    truth_pred = []
    previous_size = data.batch_size
    data.set_batch_size(1)
    for validate_batch_index, validate_images, validate_labels in data.generate_validate_data(
    ):
        validate_tensor = normalize(torch.from_numpy(validate_images))
        validate_batch_x = Variable(validate_tensor,
                                    volatile=True).cuda().float()
        validate_batch_y = Variable(torch.from_numpy(validate_labels),
                                    volatile=True).cuda().long()

        if net.model_name == 'resnet50+':
            prob, mask, _ = remove_background(validate_images)
            plant_area = np.sum(mask, (1, 2))
            avg_prob = np.divide(np.sum(prob * mask, (1, 2)),
                                 plant_area,
                                 out=np.zeros_like(plant_area).astype(
                                     np.float),
                                 where=plant_area != 0)
            avg_green = np.divide(
                np.sum(validate_images[:, 1, :, :] * mask, (1, 2)),
                plant_area,
                out=np.zeros_like(plant_area).astype(np.float),
                where=plant_area != 0)
            plant_area = np.reshape(plant_area, (data.batch_size, 1))
            plant_area = Variable(torch.from_numpy(plant_area)).cuda().float()

            avg_prob = np.reshape(avg_prob, (data.batch_size, 1))
            avg_prob = Variable(torch.from_numpy(avg_prob)).cuda().float()

            avg_green = np.reshape(avg_green, (data.batch_size, 1))
            avg_green = Variable(torch.from_numpy(avg_green)).cuda().float()

            validate_output = net(validate_batch_x, plant_area, avg_prob,
                                  avg_green)
        else:
            validate_output = net(validate_batch_x)

        _, predict_batch_y = torch.max(validate_output, 1)
        truth_pred.append([validate_batch_y.data[0], predict_batch_y.data[0]])
    truth_pred = np.array(truth_pred)
    for i in range(0, 12):
        species_pred = truth_pred[truth_pred[:, 0] == i]
        acc = []
        for j in range(0, 12):
            acc.append(np.sum(species_pred[:, 1] == j) / species_pred.shape[0])
        print("Species:{}, accuracy: {}".format(
            SeedlingsData.seedlings_labels[i], acc))
    data.set_batch_size(previous_size)
コード例 #5
0
def validate_epoch(net: Net, data: SeedlingsData, epoch: int,
                   normalize: transforms.Normalize):
    validate_total = 0
    validate_right = 0
    for validate_batch_index, validate_images, validate_labels in data.generate_validate_data(
    ):
        validate_tensor = normalize(torch.from_numpy(validate_images))
        validate_batch_x = Variable(validate_tensor,
                                    volatile=True).cuda().float()
        validate_batch_y = Variable(torch.from_numpy(validate_labels),
                                    volatile=True).cuda().long()

        if net.model_name == 'resnet50+':
            prob, mask, _ = remove_background(validate_images)
            plant_area = np.sum(mask, (1, 2))
            avg_prob = np.divide(np.sum(prob * mask, (1, 2)),
                                 plant_area,
                                 out=np.zeros_like(plant_area).astype(
                                     np.float),
                                 where=plant_area != 0)
            avg_green = np.divide(
                np.sum(validate_images[:, 1, :, :] * mask, (1, 2)),
                plant_area,
                out=np.zeros_like(plant_area).astype(np.float),
                where=plant_area != 0)
            plant_area = np.reshape(plant_area, (data.batch_size, 1))
            plant_area = Variable(torch.from_numpy(plant_area)).cuda().float()

            avg_prob = np.reshape(avg_prob, (data.batch_size, 1))
            avg_prob = Variable(torch.from_numpy(avg_prob)).cuda().float()

            avg_green = np.reshape(avg_green, (data.batch_size, 1))
            avg_green = Variable(torch.from_numpy(avg_green)).cuda().float()

            validate_output = net(validate_batch_x, plant_area, avg_prob,
                                  avg_green)
        else:
            validate_output = net(validate_batch_x)

        _, predict_batch_y = torch.max(validate_output, 1)
        validate_total += validate_batch_y.size(0)
        validate_right += sum(predict_batch_y.data.cpu().numpy() ==
                              validate_batch_y.data.cpu().numpy())
        accuracy = validate_right / validate_total
        print("Epoch:{} ,validate_batch index:{}, validate_accuracy:{}".format(
            epoch, validate_batch_index, accuracy))
    net.eval()

    accuracy = validate_right / float(validate_total)
    print("Epoch:{}, validate accuracy: {}".format(epoch, accuracy))
    return accuracy
コード例 #6
0
  parser.add_argument('-N', required=False, type=int, default=1000000000, 
                      help='if given, use that number of images (debugging)')
  args = parser.parse_args()


  with open(args.path_out, 'w') as f_out:
    for i, (image, lineparts) in enumerate(read_line(args.path_in, args.image_col)):
      if i >= args.N: break
      sys.stdout.write ('%d: ' % i)

      if args.display:
        f = float(500) / max(image.shape[0], image.shape[1])
        image0 = cv2.resize(image, None, None, f, f)
        cv2.imshow('src', image0)

      image = remove_background(image)

      if image is None: 
        if args.display:
          # show dummy.
          cv2.imshow('image', np.ones(image0.shape[:2], dtype=np.uint8) * 128)
          cv2.imshow('mask',  np.ones(image0.shape[:2], dtype=np.uint8) * 128)

      else:
        if args.display:
          f = float(500) / max(image.shape[0], image.shape[1])
          image = cv2.resize(image, None, None, f, f)
          cv2.imshow('image', image[:,:,:3])
          cv2.imshow('mask', image[:,:,-1])

        # write
コード例 #7
0
import pickle
import ast
import numpy as np
from collections import Counter
from colormath.color_diff_matrix import delta_e_cie2000
from colormath.color_objects import LabColor
from remove_background import remove_background
from PIL import Image
#
# with open('lab-matrix.txt', 'r') as f:
#     data = [ast.literal_eval(line.strip()) for line in f.readlines()]
# lab_matrix = np.array(data)
# print(lab_matrix)
#
# with open('lab-colors.pk', 'rb') as f:
#     lab_color = pickle.load(f)
#
# color = LabColor(lab_l=69.34, lab_a=-0.88, lab_b=-52.57)
# lab_color_vector = np.array([color.lab_l, color.lab_a, color.lab_b])
#
# delta = delta_e_cie2000(lab_color_vector, lab_matrix)
# res = lab_color[np.argmin(delta)]

remove_background('./sample1.jpg', 'out.png')

image = Image.open('out.png')
image = np.array(image)
print(image)