def get_to_items(browser: FireBrowser): browser.click('#pos', sleep_time=0) browser.click('#inventory') browser.click('button', '100', sleep_time=4) part_list = Parts() url = "" while browser.check_selector('a', 'Next') and url is not browser.url(): el_list = browser.get_elements('.table tr') el_list.pop(0) for el_item in el_list: info = browser.get_elements('td', el_item) """ 2: item_name 1: item_number 4: available 5: reserved 6: intransit 9: avg_cost 11: Avg_cons 10: reorder""" part_list.add_item_convert([ info[2].text, info[1].text, info[4].text, info[5].text, info[6].text[-1:], info[9].text, info[11].text, info[10].text ]) pyprint.write(part_list.last_item().save_string()) browser.click('.next > a:nth-child(1)') url = browser.url() return part_list
def compute_estimated_part_data(model_name, shape, IDS, model_rf): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): print i img = caffe.io.load_image(all_image_infos[t_id]) dh.init_with_image(img) X = dh.features(dense_points) preds_prob = model_rf.predict_proba(X) max_prob = np.max(preds_prob[:, 1]) preds_prob = preds_prob[:, 1].reshape((227, 227)).T preds = preds_prob >= (max_prob / 2) preds = skimage.morphology.closing(preds, skimage.morphology.square(10)) preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1) L, N = skimage.measure.label(preds, return_num=True, background=0) L_no_bg = L[L != -1].flatten() vals, counts = scipy.stats.mode(L_no_bg) part_label = int(vals[0]) indices = np.where(L == part_label) xmin = indices[0].min() xmax = indices[0].max() ymin = indices[1].min() ymax = indices[1].max() pmin = Part(-1, '?', -1, xmin, ymin, 1) pmax = Part(-1, '?', -1, xmax, ymax, 1) rect_parts = Parts(parts=[pmin, pmax]) rect_parts.denorm_for_size(img.shape[0], img.shape[1], size=227) rect_info = rect_parts[0].x, rect_parts[1].x, rect_parts[ 0].y, rect_parts[1].y t_img_part = Parts().get_rect(img, rect_info=rect_info) try: net.predict([t_img_part], oversample=False) except Exception: print '------', t_id, '----------' new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten() return new_Xtest_part
def run(self): if self._args.device == '?': parts = Parts() parts = [part.getName() for part in parts.list()] parts.sort() print(parts) return 0 try: part = Parts().getPartByName(self._args.device) if not self._args.hardware is None: hw = sef._args.hardware else: hw = part.listHardware() if len(hw) != 1: raise PrgError( "Cannot determine hardware select one of: %s" % hw) hw = hw[0] io = self._getIOByHardwareName(hw) self._operations = Operations(part, io, self._args.sync) return self._doOperations() except PgmError as e: print(e) return 1
def compute_estimated_part_data(model_name, shape, IDS, model_rf): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): print i img = caffe.io.load_image(all_image_infos[t_id]) dh.init_with_image(img) X = dh.features(dense_points) preds_prob = model_rf.predict_proba(X) max_prob = np.max(preds_prob[:, 1]) preds_prob = preds_prob[:, 1].reshape((227, 227)).T preds = preds_prob >= (max_prob/2) preds = skimage.morphology.closing(preds, skimage.morphology.square(10)) preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1) L, N = skimage.measure.label(preds, return_num=True, background=0) L_no_bg = L[L != -1].flatten() vals, counts = scipy.stats.mode(L_no_bg) part_label = int(vals[0]) indices = np.where(L == part_label) xmin = indices[0].min() xmax = indices[0].max() ymin = indices[1].min() ymax = indices[1].max() pmin = Part(-1, '?', -1, xmin, ymin, 1) pmax = Part(-1, '?', -1, xmax, ymax, 1) rect_parts = Parts(parts=[pmin, pmax]) rect_parts.denorm_for_size(img.shape[0], img.shape[1], size=227) rect_info = rect_parts[0].x, rect_parts[1].x, rect_parts[0].y, rect_parts[1].y t_img_part = Parts().get_rect(img, rect_info=rect_info) try: net.predict([t_img_part], oversample=False) except Exception: print '------', t_id, '----------' new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten() return new_Xtest_part
def config(self): """ Create a Parts objects from the parts specified And memoize it in self._partconfig[package] """ package = self.package if not hasattr(self, '_partconfig'): self._partconfig = {} if package not in self._partconfig: self._partconfig[package] = Parts(package, *self.parts) return self._partconfig[package]
def run(self): if self._args.device == '?': parts = Parts() parts = [part.getName() for part in parts.list()] parts.sort() print(parts) return 0 try: part = Parts().getPartByName(self._args.device) if not self._args.hardware is None: hw = sef._args.hardware else: hw = part.listHardware() if len(hw) != 1: raise PrgError("Cannot determine hardware select one of: %s" % hw) hw = hw[0] io = self._getIOByHardwareName(hw) self._operations = Operations(part, io, self._args.sync) return self._doOperations() except PgmError as e: print(e) return 1
def main(storage_name, layer, model, iteration, normalize_feat, n_neighbors, parts, feat_layer, add_noise, to_oracle, noise_std_c, noise_std_d, augment_training, augmentation_fold, augmentation_noise): if len(parts) == 0: print 'no parts where needed' exit() name = '%s-%s' % (model, iteration) nn_storage_name = 'nn-parts' nn_storage = datastore(settings.storage(nn_storage_name)) nn_storage.super_name = '%s_%s' % (storage_name, name) nn_storage.sub_name = layer nn_storage.instance_name = 'norm_%s.mat' % str(normalize_feat) nn_storage.instance_path = nn_storage.get_instance_path( nn_storage.super_name, nn_storage.sub_name, nn_storage.instance_name) cub = CUB_200_2011(settings.CUB_ROOT) safe = datastore(settings.storage(storage_name)) safe.super_name = 'features' safe.sub_name = name instance_path = safe.get_instance_path(safe.super_name, safe.sub_name, 'feat_cache_%s' % layer) feat = safe.load_large_instance(instance_path, 4) # should we normalize the feats? if normalize_feat: # snippet from : http://stackoverflow.com/a/8904762/428321 # I've went for l2 normalization. # row_sums = feat.sum(axis=1) row_norms = np.linalg.norm(feat, axis=1) new_feat = feat / row_norms[:, np.newaxis] feat = new_feat IDtrain, IDtest = cub.get_train_test_id() # the following line is not really a good idea. Only works for this dataset. Xtrain = feat[IDtrain - 1, :] Xtest = feat[IDtest - 1, :] print 'init load done' if not nn_storage.check_exists(nn_storage.instance_path): print 'calculating' # the actual NN search nn_model = sklearn.neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree', metric='minkowski', p=2) tic = time() nn_model.fit(Xtrain) toc = time() - tic print 'fitted in: ', toc tic = time() NNS = nn_model.kneighbors(Xtest, 1, return_distance=False) toc = time() - tic print 'found in: ', toc nn_storage.save_instance(nn_storage.instance_path, NNS) else: # load the NNS NNS = nn_storage.load_instance(nn_storage.instance_path) print 'loaded' # convert (N, 1) to (N,) NNS = NNS.T[0] # transfer part locations all_parts_cub = cub.get_parts() estimated_test_parts = Parts() all_image_infos = cub.get_all_image_infos() bbox = cub.get_bbox() tic = time() # estimate test parts with NN part transfer for i in range(IDtest.shape[0]): test_id = IDtest[i] nn_id = IDtrain[NNS[i]] nn_parts = all_parts_cub.for_image(nn_id) test_bbox = bbox[test_id - 1] nn_bbox = bbox[nn_id - 1] estimated_parts = nn_parts.transfer(nn_bbox, test_bbox) estimated_parts.set_for(test_id) estimated_test_parts.appends(estimated_parts) toc = time() - tic print 'transfered in', toc # load data tic = time() features_storage_r = datastore(settings.storage('ccrft')) feature_extractor_r = CNN_Features_CAFFE_REFERENCE(features_storage_r, make_net=False) features_storage_c = datastore(settings.storage('cccft')) feature_extractor_c = CNN_Features_CAFFE_REFERENCE(features_storage_c, make_net=False) if 'head' in parts: features_storage_p_h = datastore(settings.storage('ccpheadft-100000')) feature_extractor_p_h = CNN_Features_CAFFE_REFERENCE( features_storage_p_h, make_net=False) if 'body' in parts: features_storage_p_b = datastore(settings.storage('ccpbodyft-100000')) feature_extractor_p_b = CNN_Features_CAFFE_REFERENCE( features_storage_p_b, make_net=False) Xtrain_r, ytrain_r, Xtest_r, ytest_r = cub.get_train_test( feature_extractor_r.extract_one) Xtrain_c, ytrain_c, Xtest_c, ytest_c = cub.get_train_test( feature_extractor_c.extract_one) if 'head' in parts: Xtrain_p_h, ytrain_p_h, Xtest_p_h, ytest_p_h = cub.get_train_test( feature_extractor_p_h.extract_one) if 'body' in parts: Xtrain_p_b, ytrain_p_b, Xtest_p_b, ytest_p_b = cub.get_train_test( feature_extractor_p_b.extract_one) toc = time() - tic print 'loaded data in', toc def compute_estimated_part_data(model_name, shape, IDS, part_names_to_filter, add_noise, noise_std_c, noise_std_d): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): if to_oracle: t_parts = all_parts_cub.for_image(t_id) else: t_parts = estimated_test_parts.for_image(t_id) t_img_addr = all_image_infos[t_id] t_img = caffe.io.load_image(t_img_addr) t_parts_part = t_parts.filter_by_name(part_names_to_filter) t_img_part = t_parts_part.get_rect(t_img, add_noise=add_noise, noise_std_c=noise_std_c, noise_std_d=noise_std_d) try: net.predict([t_img_part], oversample=False) except Exception, e: print '------', t_id, '----------' print part_names_to_filter print t_img_addr print '------------' print t_img.shape print t_parts print '------------' print t_img_part.shape print t_parts_part raise e new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten() return new_Xtest_part
def main(storage_name, layer, model, iteration, normalize_feat, n_neighbors, parts, feat_layer, add_noise, to_oracle, noise_std_c, noise_std_d, augment_training, augmentation_fold, augmentation_noise): if len(parts) == 0: print 'no parts where needed' exit() name = '%s-%s' % (model, iteration) nn_storage_name = 'nn-parts' nn_storage = datastore(settings.storage(nn_storage_name)) nn_storage.super_name = '%s_%s' % (storage_name, name) nn_storage.sub_name = layer nn_storage.instance_name = 'norm_%s.mat' % str(normalize_feat) nn_storage.instance_path = nn_storage.get_instance_path(nn_storage.super_name, nn_storage.sub_name, nn_storage.instance_name) cub = CUB_200_2011(settings.CUB_ROOT) safe = datastore(settings.storage(storage_name)) safe.super_name = 'features' safe.sub_name = name instance_path = safe.get_instance_path(safe.super_name, safe.sub_name, 'feat_cache_%s' % layer) feat = safe.load_large_instance(instance_path, 4) # should we normalize the feats? if normalize_feat: # snippet from : http://stackoverflow.com/a/8904762/428321 # I've went for l2 normalization. # row_sums = feat.sum(axis=1) row_norms = np.linalg.norm(feat, axis=1) new_feat = feat / row_norms[:, np.newaxis] feat = new_feat IDtrain, IDtest = cub.get_train_test_id() # the following line is not really a good idea. Only works for this dataset. Xtrain = feat[IDtrain-1, :] Xtest = feat[IDtest-1, :] print 'init load done' if not nn_storage.check_exists(nn_storage.instance_path): print 'calculating' # the actual NN search nn_model = sklearn.neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree', metric='minkowski', p=2) tic = time() nn_model.fit(Xtrain) toc = time() - tic print 'fitted in: ', toc tic = time() NNS = nn_model.kneighbors(Xtest, 1, return_distance=False) toc = time() - tic print 'found in: ', toc nn_storage.save_instance(nn_storage.instance_path, NNS) else: # load the NNS NNS = nn_storage.load_instance(nn_storage.instance_path) print 'loaded' # convert (N, 1) to (N,) NNS = NNS.T[0] # transfer part locations all_parts_cub = cub.get_parts() estimated_test_parts = Parts() all_image_infos = cub.get_all_image_infos() bbox = cub.get_bbox() tic = time() # estimate test parts with NN part transfer for i in range(IDtest.shape[0]): test_id = IDtest[i] nn_id = IDtrain[NNS[i]] nn_parts = all_parts_cub.for_image(nn_id) test_bbox = bbox[test_id - 1] nn_bbox = bbox[nn_id - 1] estimated_parts = nn_parts.transfer(nn_bbox, test_bbox) estimated_parts.set_for(test_id) estimated_test_parts.appends(estimated_parts) toc = time() - tic print 'transfered in', toc # load data tic = time() features_storage_r = datastore(settings.storage('ccrft')) feature_extractor_r = CNN_Features_CAFFE_REFERENCE(features_storage_r, make_net=False) features_storage_c = datastore(settings.storage('cccft')) feature_extractor_c = CNN_Features_CAFFE_REFERENCE(features_storage_c, make_net=False) if 'head' in parts: features_storage_p_h = datastore(settings.storage('ccpheadft-100000')) feature_extractor_p_h = CNN_Features_CAFFE_REFERENCE(features_storage_p_h, make_net=False) if 'body' in parts: features_storage_p_b = datastore(settings.storage('ccpbodyft-100000')) feature_extractor_p_b = CNN_Features_CAFFE_REFERENCE(features_storage_p_b, make_net=False) Xtrain_r, ytrain_r, Xtest_r, ytest_r = cub.get_train_test(feature_extractor_r.extract_one) Xtrain_c, ytrain_c, Xtest_c, ytest_c = cub.get_train_test(feature_extractor_c.extract_one) if 'head' in parts: Xtrain_p_h, ytrain_p_h, Xtest_p_h, ytest_p_h = cub.get_train_test(feature_extractor_p_h.extract_one) if 'body' in parts: Xtrain_p_b, ytrain_p_b, Xtest_p_b, ytest_p_b = cub.get_train_test(feature_extractor_p_b.extract_one) toc = time() - tic print 'loaded data in', toc def compute_estimated_part_data(model_name, shape, IDS, part_names_to_filter, add_noise, noise_std_c, noise_std_d): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): if to_oracle: t_parts = all_parts_cub.for_image(t_id) else: t_parts = estimated_test_parts.for_image(t_id) t_img_addr = all_image_infos[t_id] t_img = caffe.io.load_image(t_img_addr) t_parts_part = t_parts.filter_by_name(part_names_to_filter) t_img_part = t_parts_part.get_rect(t_img, add_noise=add_noise, noise_std_c=noise_std_c, noise_std_d=noise_std_d) try: net.predict([t_img_part], oversample=False) except Exception, e: print '------', t_id, '----------' print part_names_to_filter print t_img_addr print '------------' print t_img.shape print t_parts print '------------' print t_img_part.shape print t_parts_part raise e new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten() return new_Xtest_part