Exemple #1
0
def calculat_hsv_model(part_of_data, image_temp_dir):

    content = []
    for line in part_of_data:
        line_fields = line.rsplit(',')
        if (len(line_fields) == 1):
            line_fields = line.rsplit("\t")

        image_url = line_fields[0]
        utils.crawl_image_from_url(image_url, image_temp_dir + '/temp.jpg')
        im = Image.open(image_temp_dir + '/temp.jpg')
        pix = im.load()

        hue = []
        satuation = []
        value = []
        (image_width, image_height) = im.size
        for index_x in range(image_width):
            for index_y in range(image_height):

                pixel = pix[index_x, index_y]

                (red, green, blue, alpha) = (0, 0, 0, 0)

                if len(pixel) == 3:
                    (red, green, blue) = pixel
                elif len(pixel) == 4:
                    (red, green, blue, alpha) = pixel

                red /= float(255)
                green /= float(255)
                blue /= float(255)

                (h, s, v) = colorsys.rgb_to_hsv(red, green, blue)
                hue.append(h)
                satuation.append(s)
                value.append(v)
                #print(str(red) + " " + str(green) + " " + str(blue))
                #print(str(h) + " " + str(s) + " " + str(v))

        mean_hsu = mean(hue)
        mean_satuation = mean(satuation)
        mean_value = mean(value)

        line_fields.append(str(mean_hsu))
        line_fields.append(str(mean_satuation))
        line_fields.append(str(mean_value))

        content.append(",".join(line_fields))

    return content
def calculat_hsv_model(part_of_data, image_temp_dir):

    content = []
    for line in part_of_data:
        line_fields = line.rsplit(",")
        if len(line_fields) == 1:
            line_fields = line.rsplit("\t")

        image_url = line_fields[0]
        utils.crawl_image_from_url(image_url, image_temp_dir + "/temp.jpg")
        im = Image.open(image_temp_dir + "/temp.jpg")
        pix = im.load()

        hue = []
        satuation = []
        value = []
        (image_width, image_height) = im.size
        for index_x in range(image_width):
            for index_y in range(image_height):

                pixel = pix[index_x, index_y]

                (red, green, blue, alpha) = (0, 0, 0, 0)

                if len(pixel) == 3:
                    (red, green, blue) = pixel
                elif len(pixel) == 4:
                    (red, green, blue, alpha) = pixel

                red /= float(255)
                green /= float(255)
                blue /= float(255)

                (h, s, v) = colorsys.rgb_to_hsv(red, green, blue)
                hue.append(h)
                satuation.append(s)
                value.append(v)
                # print(str(red) + " " + str(green) + " " + str(blue))
                # print(str(h) + " " + str(s) + " " + str(v))

        mean_hsu = mean(hue)
        mean_satuation = mean(satuation)
        mean_value = mean(value)

        line_fields.append(str(mean_hsu))
        line_fields.append(str(mean_satuation))
        line_fields.append(str(mean_value))

        content.append(",".join(line_fields))

    return content
def main():
    parser = argparse.ArgumentParser(description = 'Analyze HIT results submitted by Amazon Mechnical Turk workers.')
    parser.add_argument('-f', help = 'The mtk data source file.')
    parser.add_argument('-d', help = 'The image path')

    args = parser.parse_args()

    if (args.f != None):
        file_urls = utils.load_file(args.f)

        data_metainfo = hit.regex_datasource(file_urls)

        # data_labels: flickr high interesting 1, flickr low interesting 2, pinterest [3, 4, 5]
        data_labels = data_metainfo[0]
        # data_ids: (flickr, pinterest) image id
        data_ids = data_metainfo[1]

        count = 0
        for url in file_urls:
            utils.crawl_image_from_url(url, args.d + '/' + data_ids[count] + '.jpg')
            count += 1
Exemple #4
0
def calculat_facedetection_model(part_of_data, image_temp_dir,
                                 cascade_filename):

    cascade = facedetect.load_cascade(cascade_filename)

    content = []
    for line in part_of_data:
        line_fields = line.rsplit(',')
        if (len(line_fields) == 1):
            line_fields = line.rsplit("\t")

        image_url = line_fields[0]
        utils.crawl_image_from_url(image_url, image_temp_dir + '/temp.jpg')

        level = facedetect.detect(image_temp_dir + '/temp.jpg', cascade)
        print(level)

        line_fields.append(str(level))

        content.append(",".join(line_fields))

    return content
def calculat_facedetection_model(part_of_data, image_temp_dir, cascade_filename):
    

    cascade = facedetect.load_cascade(cascade_filename)
 
    content = []
    for line in part_of_data:
        line_fields = line.rsplit(',')
        if (len(line_fields) == 1):
            line_fields = line.rsplit("\t")

        image_url = line_fields[0]
        utils.crawl_image_from_url(image_url, image_temp_dir + '/temp.jpg')

        level = facedetect.detect(image_temp_dir + '/temp.jpg', cascade)
        print(level)

        line_fields.append(str(level))

        content.append(",".join(line_fields))
 

    return content