Esempio n. 1
0
model = get_model_instance_segmentation(chanel_data.number_of_classes()) 
print("loading model", cnf.model_name )        
model.load_state_dict(torch.load(cnf.path2model+cnf.model_name,  map_location=device ))  
model.to(device)
model.eval()


cnf.num_tsts= 1000 # inf


for i, item in enumerate(chanel_data):
    if i >= cnf.num_tsts: break
    image = item[0]    
    masks, labels = instance_segmentation_api(model, image, device, chanel_data.class_names, 
                                              threshold=0.7, rect_th=1, text_size=1, text_th=3)    
    masked_img = []
    for jj in range(len(labels)):
        masks[jj] = masks[jj].astype(dtype='uint')
        # zz[masks[jj]] = jj+1; # Image.fromarray(zz*255/(jj+1)).show()        
        img = ImageChops.multiply(image, Image.fromarray(255*masks[jj]).convert('RGB') ); # img.show()
        masked_img.append(np.array(img, dtype='uint8'))
        
    one_person_clothing_colors = ColorExtractor(masks, labels, masked_img, cnf,                                                                                                             
                                                         image_name=item[1][-10:-1]) 
    one_person_clothing_colors.pie_chart(image, fname=item[1][-10:-1], figure_size=(4, 4))

            
 
    # color_table_obj = ColorTable(dataset.class_names, cnf)