def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = net(inputs)
        a = list(net.parameters())[0]

        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        b = list(net.parameters())[0]
        print(str(torch.equal(a.data, b.data)))
        keyboard()

        train_loss += loss.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        progress_bar(
            batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))
 def forward(self, x):
     out = F.relu(self.bn1(self.conv1(x)))
     keyboard()
     out = self.layer1(out)
     out = self.layer2(out)
     out = self.layer3(out)
     out = self.layer4(out)
     out = F.avg_pool2d(out, 4)
     out = out.view(out.size(0), -1)
     out = self.linear(out)
     return out
S = tf.string_split([key], '/')
length = tf.cast(S.dense_shape[1], tf.int32)
# adjust constant value corresponding to your paths if you face issues. It should work for above format.
label = S.values[length - tf.constant(2, dtype=tf.int32)]
#keyboard()
#label = tf.string_to_number(label,out_type=tf.int32)
image = tf.image.decode_png(image_file)

# Start a new session to show example output.
with tf.Session() as sess:
    # Required to get the filename matching to run.
    #tf.initialize_all_variables().run()
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())

    # Coordinate the loading of image files.
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    for i in xrange(6):
        # Get an image tensor and print its value.
        key_val, label_val, image_tensor = sess.run([key, label, image])
        print(image_tensor.shape)
        print(key_val)
        print(label_val)
        keyboard()

    # Finish off the filename queue coordinator.
    coord.request_stop()
    coord.join(threads)
Beispiel #4
0
def Matching():
    args = getArgs()
    start = time.time()

    category_dirs = sorted(os.listdir(args.data_dir))
    """
    des = [] #Features
    labels=[] #label
    #Create Label
    volumes = []
    # We will redo this part in pandas
    
    for category_dir in category_dirs:
        if os.path.isdir(os.path.join(args.data_dir,category_dir)):
            label = category_dir
            for file in os.listdir(os.path.join(args.data_dir, category_dir)):
                if file.endswith('bbox.yml'):
                    fs = cv2.FileStorage(os.path.join(args.data_dir,category_dir,file),0)

                    vector = fs.getNode("bbox").mat()[0].tolist()
                    vector_normalized = []
                    for value in vector:
                        vector_normalized.append(remap(value,0.45,0,0.9,0.1))
                    volume = vector[0]*vector[1]*vector[2]
                    volume_normalized = remap_volume(volume,0.01,0,0.9,0.1)
                    vector_normalized.append(volume_normalized)
                    #volumes.append(volume)
                    labels.append(label)
                    #print("ClassName=%s" % (label))
                    des.append(vector_normalized)
    """

    # Read in the data
    des=[]
    used_category=[]
    labels=[]
    for file in os.listdir(args.data_dir):
        if file.endswith(".csv"):
            my_dataframe = pd.read_csv(os.path.join(args.data_dir,file))
            category_tmp = my_dataframe["item name"][0]
            if category_tmp not in used_category:
                used_category.append(category_tmp)
            x_value = remap(my_dataframe[" x"][0])
            y_value = remap(my_dataframe[" y"][0])
            z_value = remap(my_dataframe[" z"][0])
            xyz = [x_value,y_value,z_value]
            xyz = sorted(xyz)   
            des.append(xyz)

            labels.append(category_tmp)

    print 'start Grid Search'
    search_list=[29.5]
    tuned_parameters = [{'C': search_list}]

    linear_svc = LinearSVC()#SMN Modoul
    gscv = GridSearchCV(linear_svc, tuned_parameters ,cv=5) #parameters grid search
    print 'gscv.fit'
    gscv.fit(des, labels)#Training parameters tuning (Find the best parameters automatically)

    svm_best = gscv.best_estimator_#Choose the best tuned parameter
    print 'start re-learning SVM with best parameter set.'

    #### CalibratedClassifierCV is the calibrated classifier which can give probabilistic classifier
    ####sigmoid will use Platt's scaling. Refer to documentation for other methods.
    print 'searched result of  C =', svm_best.C
    best_C_for_record = svm_best.C

    svm_best = CalibratedClassifierCV(svm_best,method='sigmoid',cv=3) #Parameter setup
    svm_best.fit(des, labels) #Training

    #Save Model
    joblib.dump(svm_best, './bbox_svm_parameter.pkl', compress=9)

    #Load Model
    svm_best = joblib.load('./bbox_svm_parameter.pkl')

    ####load test image    (load test data)
    #label, test_image = LoadFile("crop_burts2.png", "/root/catkin_ws/src/tnp/tnp_svm/script/data/RealData")
    correct_count = 0
    for n in range(len(des)):
        des_test = des[n]

        ###### SVM recognition
        classes = svm_best.classes_ #Model load label Must do

        #Forward Predict
        testResponse = svm_best.predict(des_test)[[0]]
        #print("testResponse="+ str(testResponse[0]))

        # print(classes)
        confidences_matrix = svm_best.predict_proba(des_test)
        # print(confidences_matrix)
        labels_class = svm_best.classes_ #read label again(You must do this or your label will changed by machine itself)
        #print("Real: "+str(labels[n]))
        sort_ascend = np.sort(confidences_matrix) #ranking the list of predicted data
        # print(sort_ascend)
        sort_index_ascend = np.argsort(confidences_matrix) #get index and sort
        # print(sort_index_ascend)
        sort_descend = sort_ascend[0][::-1] #ranked from big to small
        #print "sort_descend" ,sort_descend
        sort_index_descend = sort_index_ascend[0][::-1] #ranked from big to small (index)
        # print(sort_index_descend)
        items_ids=[]
        for i in range (0,len(labels_class)):
            items_ids.append(labels_class[sort_index_descend[i]])
        #print(items_ids)
        if testResponse[0] == labels[n]:
            correct_count+=1

    print("Success Rate: " + str(float(correct_count)/float(len(des))))
    keyboard()
Beispiel #5
0
def main():
    # setup data directory
    root_dir = dirname(realpath(__file__))
    data_path = join(root_dir, '..', '..', 'data', 'amazon')

    # load the amazon csv
    with open(join(data_path, 'amazon.csv'), 'r', encoding='latin1') as f:
        amazon_df = pd.read_csv(f)
    amazon_df.rename(columns={'Unnamed: 0': 'pid'}, inplace=True)

    # include additional columns in amazon csv
    amazon_df = amazon_df.set_index('pid')
    amazon_df['related_title'] = ''
    amazon_df['related_description'] = ''

    # load the related text file
    with open(join(data_path, 'related.txt'), 'r', encoding='latin1') as f:
        related_text = f.read()
        related_lines = related_text.split('\n')

    # obtain product ids
    valid_df = amazon_df[amazon_df['title'].notna()
                         & amazon_df['description'].notna()]
    pids = list(valid_df.index.values)
    titles = valid_df['title'].to_dict()
    descriptions = valid_df['description'].to_dict()

    related_pids = {}
    related_titles = {}
    related_descriptions = {}

    # loop over the rows and assign values
    counter = 0
    for line in related_lines:
        # process string input
        item = line.split(' also purchased ')

        # obtain pid and related items
        pid = item[0]
        related = [
            rel_pid for rel_pid in item[1].split(' ')
            if rel_pid != 'rights_details'
        ]

        # loop over related items
        for rel_pid in related:
            if rel_pid in pids:
                related_pids[pid] = rel_pid
                related_titles[pid] = titles[rel_pid]
                related_descriptions[pid] = descriptions[rel_pid]
                break

        # counter analysis
        counter += 1
        if counter % 100 == 0:
            print(f'Counter: {counter}')

    keyboard()

    # write to amazon csv
    with open(join(data_path, 'amazon_full.csv'), 'w', encoding='latin1') as f:
        amazon_df.to_csv(f)
def Matching():
    args = getArgs()
    start = time.time()

    category_dirs = sorted(os.listdir(args.data_dir_bbox))
    weight_files = os.listdir(args.data_dir_weight)
    modelname = args.modelname

    # Read in the data
    des = []
    used_category = []
    labels = []
    names = [
        'assured_face_mask', 'assured_lavender_epsom_salts',
        'betty_crocker_measuring_spoons', 'bobbins_with_ring',
        'casemate_black_binder', 'equate_flushable_wipes',
        'five_star_red_hinged_note_cards', 'green_treat_bags', 'jute_twine',
        'mini_solo_cups', 'orange_balloons', 'pink_flamingo_cup',
        'r2_precision_pens', 'salad_tongs', 'scrub_buddies',
        'star_wars_bubbles'
    ]
    for file in os.listdir(args.data_dir_bbox):
        if file.endswith(".csv"):

            bbox_dataframe = pd.read_csv(os.path.join(args.data_dir_bbox,
                                                      file))
            category_tmp = bbox_dataframe["item name"][0]
            # print(category_tmp)
            if category_tmp not in used_category:
                used_category.append(category_tmp)
            weight = -1
            for weight_file in weight_files:
                # search for filename that if there is same as category_tmp, if it is-> add a noisy weight value and normalize it
                if category_tmp in weight_file:
                    weight_dataframe = pd.read_csv(
                        os.path.join(args.data_dir_weight, weight_file))
                    weight = float(weight_dataframe[" weight"][0])
                    weight = noisy_weight(weight)
                    weight_normalized = remap_weight(weight)
            if weight == -1:
                print("Error! Missing " + category_tmp + " weight file.")
                exit()

            x_value = remap_xyz(bbox_dataframe[" x"][0])
            y_value = remap_xyz(bbox_dataframe[" y"][0])
            z_value = remap_xyz(bbox_dataframe[" z"][0])
            volume = bbox_dataframe[" x"][0] * bbox_dataframe[" y"][
                0] * bbox_dataframe[" z"][0]
            volume_normalized = remap_volume(volume)
            xyz = [x_value, y_value, z_value]
            xyz_weight = sorted(xyz)
            xyz_weight.append(volume_normalized)
            xyz_weight.append(weight_normalized)
            # print(xyz_weight)

            # July 24, the max values were [.414, .383, .413] normalized to 0.45
            des.append(xyz_weight)

            labels.append(category_tmp)

    print 'start Grid Search'
    search_list = np.asarray(range(1, 300)) * 0.1
    search_list = search_list.tolist()
    tuned_parameters = [{'C': search_list}]

    linear_svc = LinearSVC()  #SMN Modoul
    gscv = GridSearchCV(linear_svc, tuned_parameters,
                        cv=5)  #parameters grid search
    print 'gscv.fit'
    gscv.fit(
        des, labels
    )  #Training parameters tuning (Find the best parameters automatically)

    #print 'svm_best'
    svm_best = gscv.best_estimator_  #Choose the best tuned parameter
    print 'start re-learning SVM with best parameter set.'

    #### CalibratedClassifierCV is the calibrated classifier which can give probabilistic classifier
    ####sigmoid will use Platt's scaling. Refer to documentation for other methods.
    print 'searched result of  C =', svm_best.C
    best_C_for_record = svm_best.C

    svm_best = CalibratedClassifierCV(svm_best, method='sigmoid',
                                      cv=3)  #Parameter setup
    svm_best.fit(des, labels)  #Training

    #Save Model
    joblib.dump(svm_best, modelname, compress=9)

    #Load Model
    svm_best = joblib.load(modelname)

    ####load test image    (load test data)
    #label, test_image = LoadFile("crop_burts2.png", "/root/catkin_ws/src/tnp/tnp_svm/script/data/RealData")
    correct_count = 0
    for n in range(len(des)):
        des_test = des[n]

        ###### SVM recognition
        classes = svm_best.classes_  #Model load label Must do

        #Forward Predict
        testResponse = svm_best.predict(des_test)[[0]]
        #print("testResponse="+ str(testResponse[0]))

        # print(classes)
        confidences_matrix = svm_best.predict_proba(des_test)
        # print(confidences_matrix)
        labels_class = svm_best.classes_  #read label again(You must do this or your label will changed by machine itself)
        #print("Real: "+str(labels[n]))
        sort_ascend = np.sort(
            confidences_matrix)  #ranking the list of predicted data
        # print(sort_ascend)
        sort_index_ascend = np.argsort(confidences_matrix)  #get index and sort
        # print(sort_index_ascend)
        sort_descend = sort_ascend[0][::-1]  #ranked from big to small
        #print "sort_descend" ,sort_descend
        sort_index_descend = sort_index_ascend[
            0][::-1]  #ranked from big to small (index)
        # print(sort_index_descend)
        items_ids = []
        for i in range(0, len(labels_class)):
            items_ids.append(labels_class[sort_index_descend[i]])
        #print(items_ids)
        if testResponse[0] == labels[n]:
            correct_count += 1

    print("Success Rate: " + str(float(correct_count) / float(len(des))))
    keyboard()
Beispiel #7
0
def Matching():
    start = time.time()

    des = [] #Features
    labels=[] #label
    """
    #Create Label

    for category_dir in category_dirs:
        for rgb_image_path in glob.glob(os.path.join(args.data_dir, group_dir, category_dir)):
            for file in os.listdir(os.path.join(args.data_dir, group_dir, category_dir)):
                if file.endswith('.png'):
                    label, image = LoadFile(file, rgb_image_path)
                    des1 = ComputeHOGDescriptor(image)
                    labels.append(label)
                    print("ClassName=%s" % (label))
                    des.append(des1)
    """

    train_labels = ["hand_weight","windex","bag","meshcup","pen","Ramen"]
    #Create simulate data
    for _ in range(950):
        train_vectors = np.random.rand(3) #data simulation
        des.append(train_vectors)
        chose_label = random.choice(train_labels)
        print(chose_label)
        labels.append(chose_label)

    """
    print 'start Grid Search'
    #### for color
    # tuned_parameters = [{'C': [0.6, 0.7, 0.8, 0.9]}]
    ### 12 12 3 3 C=11.0  32crop C= 10.0
    # tuned_parameters = [{'C': [9.5, 10.0, 11.0]}]

    ### 8 8 1 1 paste on black C=0.5
     # tuned_parameters = [{'C': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}]

    ## 8 8 3 3 rotate C=5.0 32_paste:C=5.0 32_crop:C=5.0 crop:C=4.5
    tuned_parameters = [{'C': [4.5, 5.0, 5.5]}]

    ### 8 8 4 4 C=7.0
    # tuned_parameters = [{'C': [3.0, 4.0, 5.0, 6.0, 7.0, 8.0]}]

    ## 8 8 4 4 rotate:C=2.8 paste:C=2.8
    # tuned_parameters = [{'C': [2.5, 2.6, 2.7, 2.8]}]

    ### 9 9 3 3 rotate C=6.0
    # tuned_parameters = [{'C': [5.5, 5.7, 6.0, 6.2, 6.5]}]

    # tuned_parameters = [{'C': [0.1, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]}]
    # parameters = {'estimator__kernel':('linear', 'rbf'), 'estimator__C':[1, 10]}

    linear_svc = LinearSVC()#SMN Modoul
    gscv = GridSearchCV(linear_svc, tuned_parameters ,cv=5) #parameters grid search
    print 'gscv.fit'
    gscv.fit(des, labels)#Training parameters tuning (Find the best parameters automatically)

    #print 'svm_best'
    svm_best = gscv.best_estimator_#Choose the best tuned parameter
    print 'searched result of  C =', svm_best.C
    print 'start re-learning SVM with best parameter set.'
    """
    #### CalibratedClassifierCV is the calibrated classifier which can give probabilistic classifier
    ####sigmoid will use Platt's scaling. Refer to documentation for other methods.

    svm_best = LinearSVC(C=4.0)
    svm_best = CalibratedClassifierCV(svm_best,method='sigmoid',cv=3) #Parameter setup
    svm_best.fit(des, labels) #Training

    #Save Model
    joblib.dump(svm_best, './bbox_svm_parameter.pkl', compress=9)

    #Load Model
    svm_best = joblib.load('./bbox_svm_parameter.pkl')

    ####load test image    (load test data)
    #label, test_image = LoadFile("crop_burts2.png", "/root/catkin_ws/src/tnp/tnp_svm/script/data/RealData")
    des_test = des[0]

    ###### SVM recognition
    classes = svm_best.classes_ #Model load label Must do

    #Forward Predict
    testResponse = svm_best.predict(des_test)
    print("testResponse=", (testResponse))

    # print(classes)
    confidences_matrix = svm_best.predict_proba(des_test)
    # print(confidences_matrix)
    labels = svm_best.classes_ #read label again(You must do this or your label will changed by machine itself)
    sort_ascend = np.sort(confidences_matrix) #ranking the list of predicted data
    # print(sort_ascend)
    sort_index_ascend = np.argsort(confidences_matrix) #get index and sort
    # print(sort_index_ascend)
    sort_descend = sort_ascend[0][::-1] #ranked from big to small
    print "sort_descend" ,sort_descend
    sort_index_descend = sort_index_ascend[0][::-1] #ranked from big to small (index)
    # print(sort_index_descend)
    items_ids=[]
    for i in range (0,len(labels)):
        items_ids.append(labels[sort_index_descend[i]])
    print(items_ids)
    keyboard()
    i=0