# get the image VOI
                mri_itk_sagittal_roi = ImSeg.GetITKVOI(mri_itk_sagittal,
                                                       voi_size, voi_ixyz)
                mri_array = ITKImageHelper.itkImage_to_ndarray(
                    mri_itk_sagittal_roi)

                # rescale the MRI VOI image
                mri_roi_rescale_itk = ImSeg.ITKStandardizeImageIntensity(
                    mri_itk_sagittal_roi, img_norm_Nbin)

                # # visual checking of images and mask
                # ImSeg.display_overlay_volume(mri_array, ser_mask_array, 'DCE-MRI image VOI + SER mask VOI', aspect=ar)

                # define texture feature list
                theimf = ImF.ImageFeature(mri_roi_rescale_itk, feature_list,
                                          ser_mask_sagittal_roi)

                for bb in glcm_Nbin:
                    print 'glcm Nbin: {}'.format(bb)
                    theimf._compute_texture_features(Rneighbor, bb)
                    theimf._compute_first_order_stats()
                    theimf._compute_shape_size_features()

                    tmp_dict = theimf.feature_output
                    tmp_dict['pt_id'] = pt_id
                    tmp_dict['pt_mrn'] = pt_mrn
                    tmp_dict['pt_accession_num'] = pt_acc_num
                    tmp_dict['dce_series_dmi_fname'] = pt_dmi_list[ii]
                    tmp_dict['glcm_Nbin'] = bb
                    tmp_dict['organ_mask'] = 'breast tumor'
                    tmp_dict['process_name'] = 'GetImageFeature_VOI'
Пример #2
0
    def __init__(self, folder_path, num_frames, num_channels):
        self.folder_path = folder_path
        self.num_frames = num_frames

        # initialize the list of list of dots for already tested frames
        self.already_tested = [[] for i in range(num_frames + 1)]

        # initialize the two lists positive and negative dataset
        self.positive_dataset = list()
        self.negative_dataset = list()

        # initialize the occupied grid
        self.occupied_grid = np.zeros((512, 512), dtype='bool')

        # initialize the two async results objects
        self.res_plus = None
        self.res_minus = None

        # initialize the random forest classifier with default values
        self.clf = ensemble.RandomForestClassifier(300,
                                                   max_features=0.5,
                                                   max_depth=2,
                                                   n_jobs=1)

        # initialize the probability map array
        self.probabilities = np.zeros((512, 512))

        # the sequence starts with the index 1
        self.current_idx = 1

        # create the window
        self.root = Tk()
        self.root.title("SmartAnnotator")
        self.root.geometry('512x555')

        self.root.bind("<Left>", self._on_left)
        self.root.bind("<Right>", self._on_right)

        # create the refiner
        self.refiner = Ref.Refiner(self.root, self)
        self.refiner.withdraw()

        # take a random image for the mser setting tab
        mser_image = self.get_image_from_idx(random.randint(1, num_frames - 1))

        # create the settings window
        self.settings = Sw.SettingWindow(self.root, MAX_NUM_FEATURES,
                                         self.num_frames, mser_image)
        self.settings.withdraw()

        # buttons
        button_paned_window = PanedWindow(orient=HORIZONTAL)
        button_paned_window.grid(row=0, column=0)

        self.settings_icon = ImageTk.PhotoImage(
            Image.open("icons/settings.png"))
        self.settings_button = Button(self.root,
                                      image=self.settings_icon,
                                      command=self.settings.deiconify)
        button_paned_window.add(self.settings_button)

        self.combobox_value = StringVar()
        combobox = ttk.Combobox(self.root,
                                textvariable=self.combobox_value,
                                state='readonly',
                                width=4)
        list_channels = list()
        for i in range(1, num_channels + 1):
            list_channels.append('ch' + str(i))
        list_values = ['RGB'] + list_channels
        combobox['values'] = tuple(list_values)
        combobox.current(0)
        combobox.bind("<<ComboboxSelected>>", self._new_combobox_selection)
        button_paned_window.add(combobox)

        train_forest_button = Button(self.root,
                                     text="Train",
                                     command=self.train_command)
        button_paned_window.add(train_forest_button)

        test_forest_button = Button(self.root,
                                    text="Test",
                                    command=self.test_command)
        button_paned_window.add(test_forest_button)

        # add the confidence slider
        self.slider = Scale(self.root,
                            from_=0.0,
                            to=1,
                            resolution=0.01,
                            orient=HORIZONTAL)
        self.slider.set(0.7)
        self.slider.bind("<ButtonRelease-1>", self.slider_command)
        button_paned_window.add(self.slider)

        self.overlay_button = IntVar()
        check_button = Checkbutton(self.root,
                                   text="",
                                   variable=self.overlay_button,
                                   command=self.overlay)
        button_paned_window.add(check_button)

        refine_button = Button(self.root,
                               text="Refine!",
                               command=self.refine_command)
        button_paned_window.add(refine_button)

        left_button = Button(self.root, text="<", command=self.left_command)
        button_paned_window.add(left_button)

        right_button = Button(self.root, text=">", command=self.right_command)
        button_paned_window.add(right_button)

        self.current_idx_entry = Entry(self.root, width=5, justify=RIGHT)
        self.current_idx_entry.bind("<Return>", self._return_on_entry)
        self.current_idx_entry.bind("<ButtonRelease-1>", self._focus_on_entry)
        self.current_idx_entry.insert(END, str(self.current_idx))
        button_paned_window.add(self.current_idx_entry)

        num_frames_label = Label(self.root, text='/' + str(self.num_frames))
        button_paned_window.add(num_frames_label)

        track_button = Button(self.root,
                              text="Track",
                              command=self.track_command)
        button_paned_window.add(track_button)

        # image label
        self.imgArray = self.get_image_from_idx(self.current_idx)
        self.current_image = Image.fromarray(self.imgArray)
        self.img = ImageTk.PhotoImage(self.current_image)

        img_label = Label(self.root, image=self.img)
        img_label.grid(row=1, column=0)

        # bind the click actions to the image label
        img_label.bind("<Button 1>", self.add_positive_sample)
        img_label.bind("<Button 2>", self.add_bunch_negative_samples)
        img_label.bind("<Button 3>", self.add_negative_sample_event)

        # create the feature object and initialize it
        self.image_feature = If.ImageFeature(self.settings.get_patch_size())
        self.image_feature.update_features(self.imgArray, self.current_idx,
                                           True)

        # initialize multiprocessing pool
        self.pool = mp.Pool(processes=2)

        # flags
        self.updated = False
        self.has_been_tested = False

        self.root.mainloop()
Пример #3
0
 def __init__(self, nHidden, seqLen):
     self.representation_score = {}
     self.y = tf.placeholder(tf.float32, shape=[None, 1])
     self.extractFeature = ExtractFeature.ExtractFeature()
     self.imageFeature = ImageFeature.ImageFeature()
     newNet = tf.reduce_mean(self.imageFeature.outputLS, axis=0) 
     self.textFeature = TextFeature.TextFeature(nHidden, seqLen, self.extractFeature.finalState, newNet)
     self.l2_para = 1e-7
     with tf.variable_scope("training_variable"):
         
         self.weights = {
             "MLP1": tf.Variable(tf.truncated_normal(shape=[512, 256],
                                                     stddev=0.08, name="MLP1_W")),
             "MLP2": tf.Variable(tf.truncated_normal(shape=[256, 1], 
                                                     stddev=0.08, name="MLP2_W")), 
             "ATT_attr1_1": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize+self.extractFeature.embSize, self.imageFeature.defaultFeatureSize/2+self.extractFeature.embSize/2], stddev=0.08, name="ATT_attr1_1")), 
             "ATT_attr1_2": tf.Variable(tf.truncated_normal(shape=[self.textFeature.nHidden*2+self.extractFeature.embSize, self.textFeature.nHidden+self.extractFeature.embSize/2], stddev=0.08, name="ATT_attr1_2")), 
             "ATT_attr1_3": tf.Variable(tf.truncated_normal(shape=[2*self.extractFeature.embSize, self.extractFeature.embSize], stddev=0.08, name="ATT_attr1_3")), 
             "ATT_attr2_1": tf.Variable(tf.truncated_normal(shape=[ self.imageFeature.defaultFeatureSize/2+self.extractFeature.embSize/2, 1], stddev=0.08, name="ATT_attr2_1")), 
             "ATT_attr2_2": tf.Variable(tf.truncated_normal(shape=[ self.textFeature.nHidden+self.extractFeature.embSize/2, 1], stddev=0.08, name="ATT_attr2_2")), 
             "ATT_attr2_3": tf.Variable(tf.truncated_normal(shape=[ self.extractFeature.embSize, 1], stddev=0.08, name="ATT_attr2_3")), 
             "ATT_img1_1": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize+self.textFeature.nHidden*2, self.imageFeature.defaultFeatureSize/2+self.textFeature.nHidden], stddev=0.08, name="ATT_image1_1")), 
             "ATT_img1_2": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize+self.extractFeature.embSize, self.imageFeature.defaultFeatureSize/2+self.extractFeature.embSize/2], stddev=0.08, name="ATT_image1_2")), 
             "ATT_img1_3": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize*2, self.imageFeature.defaultFeatureSize], stddev=0.08, name="ATT_image1_3")), 
             "ATT_img2_1": tf.Variable(tf.truncated_normal(shape=[ self.imageFeature.defaultFeatureSize/2+self.textFeature.nHidden, 1], stddev=0.08, name="ATT_image2_1")), 
             "ATT_img2_2": tf.Variable(tf.truncated_normal(shape=[ self.imageFeature.defaultFeatureSize/2+self.extractFeature.embSize/2, 1], stddev=0.08, name="ATT_image2_2")), 
             "ATT_img2_3": tf.Variable(tf.truncated_normal(shape=[ self.imageFeature.defaultFeatureSize, 1], stddev=0.08, name="ATT_image2_3")), 
             "ATT_text1_1": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize+self.textFeature.nHidden*2, self.imageFeature.defaultFeatureSize/2+self.textFeature.nHidden], stddev=0.08, name="ATT_text1_1")), 
             "ATT_text1_2": tf.Variable(tf.truncated_normal(shape=[self.textFeature.nHidden*2+self.extractFeature.embSize, self.textFeature.nHidden+self.extractFeature.embSize/2], stddev=0.08, name="ATT_text1_2")), 
             "ATT_text1_3": tf.Variable(tf.truncated_normal(shape=[self.textFeature.nHidden*4, self.textFeature.nHidden*2], stddev=0.08, name="ATT_text1_3")), 
             "ATT_text2_1": tf.Variable(tf.truncated_normal(shape=[ self.imageFeature.defaultFeatureSize/2+self.textFeature.nHidden, 1], stddev=0.08, name="ATT_text2_1")), 
             "ATT_text2_2": tf.Variable(tf.truncated_normal(shape=[ self.textFeature.nHidden+self.extractFeature.embSize/2, 1], stddev=0.08, name="ATT_text2_2")), 
             "ATT_text2_3": tf.Variable(tf.truncated_normal(shape=[ self.textFeature.nHidden*2, 1], stddev=0.08, name="ATT_text2_3")), 
             "ATT_WI1": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize, 512], stddev=0.08, name="ATT_WI")), 
             "ATT_WT1": tf.Variable(tf.truncated_normal(shape=[2*nHidden, 512], stddev=0.08, name="ATT_WT")), 
             "ATT_WA1": tf.Variable(tf.truncated_normal(shape=[200, 512], stddev=0.08, name="ATT_WA")), 
             "ATT_WI2": tf.Variable(tf.truncated_normal(shape=[self.imageFeature.defaultFeatureSize, 512], stddev=0.08, name="ATT_WI2")), 
             "ATT_WT2": tf.Variable(tf.truncated_normal(shape=[2*nHidden, 512], stddev=0.08, name="ATT_WT2")), 
             "ATT_WA2": tf.Variable(tf.truncated_normal(shape=[200, 512], stddev=0.08, name="ATT_WA2")), 
             "ATT_WF_1": tf.Variable(tf.truncated_normal(shape=[512, 1], stddev=0.08, name="ATT_WF_1")), 
             "ATT_WF_2": tf.Variable(tf.truncated_normal(shape=[512, 1], stddev=0.08, name="ATT_WF_2")), 
             "ATT_WF_3": tf.Variable(tf.truncated_normal(shape=[512, 1], stddev=0.08, name="ATT_WF_3")), 
         }
         self.biases = {
             "MLP1": tf.Variable(tf.constant(0.01, shape=[256], dtype=tf.float32, name="MLP1_b")),
             "MLP2": tf.Variable(tf.constant(0.01, shape=[1], dtype=tf.float32, name="MLP2_b")),
             "ATT_attr1_1": tf.Variable(tf.constant(0.01, shape=[self.imageFeature.defaultFeatureSize/2+self.extractFeature.embSize/2], name="ATT_attr1_1")), 
             "ATT_attr1_2": tf.Variable(tf.constant(0.01, shape=[self.textFeature.nHidden+self.extractFeature.embSize/2], name="ATT_attr1_2")), 
             "ATT_attr1_3": tf.Variable(tf.constant(0.01, shape=[self.extractFeature.embSize], name="ATT_attr1_3")), 
             "ATT_attr2_1": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_attr2_1")), 
             "ATT_attr2_2": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_attr2_2")), 
             "ATT_attr2_3": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_attr2_3")), 
             "ATT_img1_1": tf.Variable(tf.constant(0.01, shape=[self.imageFeature.defaultFeatureSize/2+self.textFeature.nHidden], name="ATT_image1_1")), 
             "ATT_img1_2": tf.Variable(tf.constant(0.01, shape=[self.imageFeature.defaultFeatureSize/2+self.extractFeature.embSize/2], name="ATT_image1_2")), 
             "ATT_img1_3": tf.Variable(tf.constant(0.01, shape=[self.imageFeature.defaultFeatureSize], name="ATT_image1_3")), 
             "ATT_img2_1": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_image2_1")), 
             "ATT_img2_2": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_image2_2")), 
             "ATT_img2_3": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_image2_3")), 
             "ATT_text1_1": tf.Variable(tf.constant(0.01, shape=[self.imageFeature.defaultFeatureSize/2+self.textFeature.nHidden], name="ATT_text1_1")), 
             "ATT_text1_2": tf.Variable(tf.constant(0.01, shape=[self.textFeature.nHidden+self.extractFeature.embSize/2], name="ATT_text1_2")), 
             "ATT_text1_3": tf.Variable(tf.constant(0.01, shape=[self.textFeature.nHidden*2], name="ATT_text1_3")), 
             "ATT_text2_1": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_text2_1")), 
             "ATT_text2_2": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_text2_2")), 
             "ATT_text2_3": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_text2_3")), 
             "ATT_WW": tf.Variable(tf.constant(0.01, shape=[512], name="ATT_WW")), 
             "ATT_WI": tf.Variable(tf.constant(0.01, shape=[512], name="ATT_WI")), 
             "ATT_WT": tf.Variable(tf.constant(0.01, shape=[512], name="ATT_WT")), 
             "ATT_WI1": tf.Variable(tf.constant(0.01, shape=[512], name="ATT_WI1")), 
             "ATT_WT1": tf.Variable(tf.constant(0.01, shape=[512], name="ATT_WT1")), 
             "ATT_WA": tf.Variable(tf.constant(0.01, shape=[512], name="ATT_WA")), 
             "ATT_WF_1": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_WF_1")), 
             "ATT_WF_2": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_WF_2")), 
             "ATT_WF_3": tf.Variable(tf.constant(0.01, shape=[1], name="ATT_WF_3")), 
         }
     print("newnet dimension :", newNet)
     
     imageVec = self.Attention(newNet, self.imageFeature.outputLS, self.textFeature.RNNState, self.extractFeature.finalState, "ATT_img1", "ATT_img2", 196, True)
     textVec = self.Attention(self.textFeature.RNNState, self.textFeature.outputs, newNet, self.extractFeature.finalState, "ATT_text1", "ATT_text2", self.textFeature.seqLen, False)
     attrVec = self.Attention(self.extractFeature.finalState, self.extractFeature.inputEmb, newNet, self.textFeature.RNNState, "ATT_attr1", "ATT_attr2", 5, False)
     
     attHidden = tf.tanh(tf.matmul(imageVec, self.weights["ATT_WI1"])+self.biases["ATT_WI1"])
     attHidden2 = tf.tanh(tf.matmul(textVec, self.weights["ATT_WT1"])+self.biases["ATT_WT1"])
     attHidden3 = tf.tanh(tf.matmul(attrVec, self.weights["ATT_WA1"])+self.biases["ATT_WW"])
     scores1 = tf.matmul(attHidden, self.weights["ATT_WF_1"])+self.biases["ATT_WF_1"]
     scores2 = tf.matmul(attHidden2, self.weights["ATT_WF_2"])+self.biases["ATT_WF_2"]
     scores3 = tf.matmul(attHidden3, self.weights["ATT_WF_3"])+self.biases["ATT_WF_3"]
     scoreLS = [scores1, scores2, scores3]
     scoreLS = tf.nn.softmax(scoreLS, dim=0)
     imageVec = tf.tanh(tf.matmul(imageVec, self.weights["ATT_WI2"])+self.biases["ATT_WI"])
     textVec = tf.tanh(tf.matmul(textVec, self.weights["ATT_WT2"])+self.biases["ATT_WT"])
     attrVec = tf.tanh(tf.matmul(attrVec, self.weights["ATT_WA2"])+self.biases["ATT_WA"])
     self.concatInput = scoreLS[0]*imageVec+scoreLS[1]*textVec+scoreLS[2]*attrVec
Пример #4
0
    # print ig_tumor.samplingRCS
    # print ig_mri.direction_cosine_matrix

    # # visualize the volumn
    # ImP.ITK_Image_OverlayPlot(itk_img_orient,itk_tumor_orient,'MAMMIPET patient {} images, Tumor #{}'.format(img_type,ii))

    # get image feature
    feature_list = [
        'autocorrelation', 'cluster_prominence', 'cluster_shade',
        'cluster_tendency', 'contrast', 'correlation', 'diff_entropy',
        'dissimilarity', 'energy', 'entropy', 'homogeneity1', 'homogeneity2',
        'idmn', 'idn', 'inv_var', 'maxprob', 'sum_avg', 'sum_entropy',
        'sum_var'
    ]

    theimf = ImF.ImageFeature(itk_img_orient, feature_list, itk_tumor_orient)

    pt_features_data = pd.DataFrame()
    for aa in img_norm_Nbin:
        for bb in glcm_Nbin:
            print 'img_Nbin: {}, glcm Nbin: {}'.format(aa, bb)
            theimf._compute_texture_features(Rneighbor,
                                             GSnorm_Nbin=aa,
                                             glcm_Nbin=bb)
            theimf._compute_first_order_stats()
            theimf._compute_shape_size_features()

            tmp_df = theimf.feature_output
            tmp_df['glcm_Nbin'] = bb
            tmp_df['img_norm_Nbin'] = aa
            tmp_df['organ_mask'] = 'breast tumor'
        text_score = self.text_linear_2(text_hidden)
        attribute_score = self.attribute_linear_2(attribute_hidden)
        score = torch.nn.functional.softmax(torch.stack(
            [image_score, text_score, attribute_score]),
                                            dim=0)
        image_vector = torch.tanh(self.image_linear_3(image_vector))
        text_vector = torch.tanh(self.text_linear_3(text_vector))
        attribute_vector = torch.tanh(
            self.attribute_linear_3(attribute_vector))
        # final fuse
        output = score[0] * image_vector + score[1] * text_vector + score[
            2] * attribute_vector
        return output


if __name__ == "__main__":
    image = ImageFeature.ExtractImageFeature()
    text = TextFeature.ExtractTextFeature(LoadData.TEXT_LENGTH,
                                          LoadData.TEXT_HIDDEN)
    attribute = AttributeFeature.ExtractAttributeFeature()
    fuse = ModalityFusion()
    for text_index, image_feature, attribute_index, group, id in LoadData.train_loader:
        image_result, image_seq = image(image_feature)
        text_result, text_seq = text(text_index)
        attribute_result, attribute_seq = attribute(attribute_index)
        result = fuse(image_result, image_seq, text_result,
                      text_seq.permute(1, 0, 2), attribute_result,
                      attribute_seq.permute(1, 0, 2))
        print(result.shape)

        break
Пример #6
0
    ]
    test = [pt_dmi_list[1]]
    for ff in test:
        dceSeries = dmi.DMI(ff)
        mri_itk_img = ITKImageHelper.generate_oriented_itkImage(dceSeries)

        # figure out the patient MRN
        tag_mrn = dicom.tag.Tag(0x0010, 0x0020)
        pt_mrn = dceSeries._DS[tag_mrn].value

        # the generated tumor mask from shuang method
        mask_fname = '{}/TumorMask_shuang.nrrd'.format(pt_series_dir)
        mask_itk_img = ITKImageHelper.itkImage_read(mask_fname)

        # define texture feature list
        theimf = ImF.ImageFeature(mri_itk_img, feature_list, mask_itk_img)
        # theimf = ImF.ImageFeature(mri_itk_img,feature_list)
        Rneighbor = 1
        theimf._compute_texture_features(Rneighbor)
        print 'texture feature complete!'
        theimf._compute_first_order_stats()
        print 'first order stats complete!'
        theimf._compute_shape_size_features()
        print 'shape size feature complete!'

        tmp_dict = theimf.feature_output
        tmp_dict['pt_id'] = pt_id
        tmp_dict['pt_mrn'] = pt_mrn
        tmp_dict['dce_series_dmi_fname'] = ff

        pt_features_data = pt_features_data.append(tmp_dict, ignore_index=True)
Пример #7
0
            check_resample = (np.array(IG_pet.samplingRCS) > the_img_spacingRCS).all()
            if check_resample:
                print 'PET image voxsel spacing is coarser than the desired spacing, {}'.format(IG_pet.samplingRCS)
                final_itk_mask = itkif.ITKImageResample(mask_itk, the_img_spacingRCS,is_mask=True)
                final_itk_img = itkif.ITKImageResample(petct_suv_itk,the_img_spacingRCS,is_mask=False)
            else:
                print 'the image voxel spacing is the same as the desired voxel spacing! NO need to resample the image!'
                final_itk_img = petct_suv_itk
                final_itk_mask = mask_itk

            if is_vis_check == 1:
                # visual checking of the final processed PET images and the tumor mask
                ImP.ITK_Image_OverlayPlot(final_itk_img,final_itk_mask,'Post Resampling + SUV PET image + Tumor mask')

            # define texture feature list
            theimf = ImF.ImageFeature(final_itk_img,feature_list,final_itk_mask)

            pt_features_data = pd.DataFrame()
            for aa in img_norm_Nbin:
                for bb in glcm_Nbin:
                    print 'img norm Nbin: {}, glcm Nbin: {}'.format(aa,bb)
                    theimf._compute_texture_features(Rneighbor,GSnorm_Nbin=aa,glcm_Nbin=bb)
                    theimf._compute_first_order_stats()
                    theimf._compute_shape_size_features()

                    tmp_dict = theimf.feature_output
                    tmp_dict['pt_id'] = pt_id
                    tmp_dict['pt_mrn'] = pt_mrn
                    tmp_dict['pt_accession_num'] = pt_acc_num
                    tmp_dict['pet_series_fdir'] = pet_fdir
                    tmp_dict['glcm_Nbin'] = bb
Пример #8
0
    return rgb, inputImg.shape[0], inputImg.shape[1]


#------------------------------------------------------------------------
#読み込みたい画像集合のパス(例:outimg/ACE2/strawberry)
inputImgPath = sys.argv[1]

#学習済みパラメータの取得
intercept = np.load("LogisticRegresion/intercept.npy")
coef = np.load("LogisticRegresion/coef.npy")

#標準器の読み込み
scaler = sklearn.externals.joblib.load("LogisticRegresion/FeatureScaler.pkl")

#画像特徴量取得に関するインスタンス
imageFeature = ImageFeature.ImageFeature()

features = np.empty((0, coef.shape[0]))

#initRGB = getImageRGBFromPath("img/All/" + fileName + ".jpg")
initRGB = []
for it in range(100):
    rgb, imgH, imgW = getImageRGBFromPath(inputImgPath + "_" + str(it) +
                                          ".jpg")

    #特徴量の取得
    feature = imageFeature.getImageFeatureFromRGB(rgb, imgH, imgW, initRGB)
    feature[np.isnan(feature)] = 0

    features = np.r_[features, feature]