Beispiel #1
0
def visualize(data_root, result_root, scenes, daytimes, approaches,
              evaluations, weathers, visibilities, rainfall_rates):

    d = Dataset(data_root)
    e = Evaluator(data_root)

    for scene in scenes:
        if 'intermetric' in evaluations:

            depth = e.load_depth_groundtruth(scene,
                                             frame='rgb_left',
                                             gt_type='intermetric')
            depth_color = colorize_depth(depth,
                                         min_distance=e.clip_min,
                                         max_distance=e.clip_max)

            intermetric_path = os.path.join(
                result_root, 'intermetric',
                '{}_{}.jpg'.format('intermetric', scene))
            if not os.path.exists(os.path.split(intermetric_path)[0]):
                os.makedirs(os.path.split(intermetric_path)[0])
            cv2.imwrite(intermetric_path, depth_color)

            # top_view, top_view_color = e.create_top_view(e.load_depth_groundtruth, scene)
            top_view, top_view_color = e.create_top_view(scene, 'intermetric')
            intermetric_top_view_file = os.path.join(
                result_root, 'intermetric',
                '{}_{}_topview.jpg'.format('intermetric', scene))
            cv2.imwrite(intermetric_top_view_file, top_view_color)

        for daytime in daytimes:
            for weather in weathers:
                samples = []

                if weather == 'fog':
                    for visibility in visibilities:
                        samples.append(
                            d.get_fog_sequence(scene, daytime, visibility)[0])

                if weather == 'rain':
                    for rainfall_rate in rainfall_rates:
                        samples.append(
                            d.get_rain_sequence(scene, daytime,
                                                rainfall_rate)[0])

                if weather == 'clear':
                    samples.append(d.get_clear_sequence(scene, daytime)[0])

                for i, sample in enumerate(samples):
                    print(sample)

                    if 'rgb' in evaluations:
                        rgb = e.load_rgb(sample)

                        if weather == 'fog':
                            rgb_path = os.path.join(
                                result_root, 'rgb', '{}_{}_{}_{}_{}'.format(
                                    'rgb', scene, daytime, weather,
                                    visibilities[i]))
                        elif weather == 'rain':
                            rgb_path = os.path.join(
                                result_root, 'rgb', '{}_{}_{}_{}_{}'.format(
                                    'rgb', scene, daytime, weather,
                                    rainfall_rates[i]))
                        elif weather == 'clear':
                            rgb_path = os.path.join(
                                result_root, 'rgb',
                                '{}_{}_{}_{}'.format('rgb', scene, daytime,
                                                     weather))

                        if not os.path.exists(os.path.split(rgb_path)[0]):
                            os.makedirs(os.path.split(rgb_path)[0])

                        cv2.imwrite(rgb_path + '.jpg', rgb)

                        clahe = cv2.createCLAHE(clipLimit=2.0,
                                                tileGridSize=(8, 8))
                        rgb[:, :, 0] = clahe.apply(rgb[:, :, 0])
                        rgb[:, :, 1] = clahe.apply(rgb[:, :, 1])
                        rgb[:, :, 2] = clahe.apply(rgb[:, :, 2])

                        cv2.imwrite(rgb_path + '_clahe.jpg', rgb)

                    if 'lidar_raw' in evaluations:

                        depth = e.load_depth(sample,
                                             'lidar_hdl64_rgb_left',
                                             interpolate=False)
                        depth_color = colorize_pointcloud(
                            depth,
                            min_distance=e.clip_min,
                            max_distance=e.clip_max,
                            radius=5)

                        if weather == 'fog':
                            lidar_path = os.path.join(
                                result_root, 'lidar_raw',
                                '{}_{}_{}_{}_{}'.format(
                                    'lidar_raw', scene, daytime, weather,
                                    visibilities[i]))
                        elif weather == 'rain':
                            lidar_path = os.path.join(
                                result_root, 'lidar_raw',
                                '{}_{}_{}_{}_{}'.format(
                                    'lidar_raw', scene, daytime, weather,
                                    rainfall_rates[i]))
                        elif weather == 'clear':
                            lidar_path = os.path.join(
                                result_root, 'lidar_raw',
                                '{}_{}_{}_{}'.format('lidar_raw', scene,
                                                     daytime, weather))

                        if not os.path.exists(os.path.split(lidar_path)[0]):
                            os.makedirs(os.path.split(lidar_path)[0])

                        cv2.imwrite(lidar_path + '.jpg', depth_color)

                    if 'gated' in evaluations:
                        for t in [0, 17, 31]:

                            gated_img = e.load_gated(sample, t)

                            if weather == 'fog':
                                gated_path = os.path.join(
                                    result_root, 'gated{}'.format(t),
                                    '{}_{}_{}_{}_{}'.format(
                                        'gated{}'.format(t), scene, daytime,
                                        weather, visibilities[i]))
                            elif weather == 'rain':
                                gated_path = os.path.join(
                                    result_root, 'gated{}'.format(t),
                                    '{}_{}_{}_{}_{}'.format(
                                        'gated{}'.format(t), scene, daytime,
                                        weather, rainfall_rates[i]))
                            elif weather == 'clear':
                                gated_path = os.path.join(
                                    result_root, 'gated{}'.format(t),
                                    '{}_{}_{}_{}'.format(
                                        'gated{}'.format(t), scene, daytime,
                                        weather))

                            if not os.path.exists(
                                    os.path.split(gated_path)[0]):
                                os.makedirs(os.path.split(gated_path)[0])

                            cv2.imwrite(gated_path + '.jpg', gated_img)

                            clahe = cv2.createCLAHE(clipLimit=2.0,
                                                    tileGridSize=(8, 8))
                            gated_img[:, :, 0] = clahe.apply(gated_img[:, :,
                                                                       0])
                            gated_img[:, :, 1] = clahe.apply(gated_img[:, :,
                                                                       1])
                            gated_img[:, :, 2] = clahe.apply(gated_img[:, :,
                                                                       2])

                            cv2.imwrite(gated_path + '_clahe.jpg', gated_img)

                    for approach in approaches:
                        if weather == 'fog':
                            sample_path = os.path.join(
                                result_root, approach, '{}_{}_{}_{}_{}'.format(
                                    approach, scene, daytime, weather,
                                    visibilities[i]))
                        elif weather == 'rain':
                            sample_path = os.path.join(
                                result_root, approach, '{}_{}_{}_{}_{}'.format(
                                    approach, scene, daytime, weather,
                                    rainfall_rates[i]))
                        elif weather == 'clear':
                            sample_path = os.path.join(
                                result_root, approach,
                                '{}_{}_{}_{}'.format(approach, scene, daytime,
                                                     weather))

                        if not os.path.exists(os.path.split(sample_path)[0]):
                            os.makedirs(os.path.split(sample_path)[0])

                        if 'depth_map' in evaluations:
                            depth = e.load_depth(sample, approach)

                            depth_color = colorize_depth(
                                depth,
                                min_distance=e.clip_min,
                                max_distance=e.clip_max)
                            depth_map_path = sample_path + '_depth_map.jpg'
                            cv2.imwrite(depth_map_path, depth_color)

                        if 'error_image' in evaluations:
                            error_image = e.error_image(sample,
                                                        approach,
                                                        gt_type='intermetric')
                            error_image_path = sample_path + '_error_image.jpg'
                            cv2.imwrite(error_image_path, error_image)

                        if 'top_view' in evaluations:
                            top_view, top_view_color = e.create_top_view(
                                sample, approach)
                            top_view_path = sample_path + '_top_view.jpg'
                            cv2.imwrite(top_view_path, top_view_color)
Beispiel #2
0
    def trainModel(self,
                   allTrainData,
                   searchRange=[1, 300],
                   budget=25000 * 1000):
        """
        Train the constant model.

        A best bid price will be returned

        Budget should be in chinese fen * 1000

        :param allTrainData: Training data in matrix
        :param searchRange: Search Grid in array for best bid price. [lowerbound, upperbound]
        :param budget: The budget to use. chinese fen * 1000
        :return: The best constant bid price that obtained the highest CTR

        """
        # goldlabel = np.copy(allTrainData)
        # goldlabel = np.delete(goldlabel, [0, 21, 22], axis=1)# remove 'click','bidprice','payprice'

        bestBid = 0
        bestCTR = 0
        bestClick = 0
        # print(goldlabel.shape)
        for bid in range(searchRange[0], searchRange[1]):
            # for bid in range(1000, 1001):  # To test cutting back budget
            self.defaultBid = bid
            # start_time = time.time()
            bids = self.getBidPrice(allTrainData.bidid)
            # print('Metrics np.apply_along_axis time: {} seconds'.format(round(time.time() - start_time, 2)))
            # myEvaluator = Evaluator(budget, bids, allTrainData)
            myEvaluator = Evaluator()
            resultDict = myEvaluator.computePerformanceMetricsDF(
                budget, bids, allTrainData)

            # if resultDict['won'] != 0:
            #     print("Constant bid: {} CTR: {}".format(self.defaultBid, resultDict['click'] / resultDict['won']))
            # else:
            #     print("Constant bid: {} CTR: not computed as no. of won is 0".format(self.defaultBid))

            # if resultDict['won'] != 0:
            #     currentCTR = resultDict['click'] / resultDict['won']
            # else:
            #     continue

            # if currentCTR > bestCTR:
            #     bestCTR = currentCTR
            #     bestBid = bid

            if resultDict['won'] != 0:
                print("Constant bid: {} Clicks: {}".format(
                    self.defaultBid, resultDict['click']))
                print("bestBid: ", bestBid)
                print("bestClick: ", bestClick)
            else:
                print("Constant bid: {} CTR: not computed as no. of won is 0".
                      format(self.defaultBid))

            if resultDict['won'] != 0:
                currentClick = resultDict['click']
            else:
                continue

            if currentClick > bestClick:
                bestClick = currentClick
                bestBid = bid

        # print("bestBid: ", bestBid)
        # print("bestCTR: ", bestCTR)
        print("bestBid: ", bestBid)
        print("bestClick: ", bestClick)

        self.defaultBid = bestBid

        return self.defaultBid
def evaluate_agent(args, shared_value, share_net):

    evaluator = Evaluator(args, shared_value, share_net)
    evaluator.run()
Beispiel #4
0
    def findAnomalies(self, saveChart=False, saveEvaluation=False):
        outliers_fraction = 0.15
        clf = EllipticEnvelope(contamination=outliers_fraction)
        predicted_outlier = []
        list_of_df = self.dataCollector.getWithAnomaly()
        for df in list_of_df:
            if df.shape[0] > 0:
                data = df.drop(['anomaly', 'changepoint'], axis=1)
                self.st_tr_time.append(datetime.datetime.now().timestamp())
                prediction = pd.Series(clf.fit_predict(data) * -1, index=df.index) \
                    .rolling(5) \
                    .median() \
                    .fillna(0).replace(-1, 0)
                self.en_tr_time.append(datetime.datetime.now().timestamp())
                # predicted outliers saving
                predicted_outlier.append(prediction)
                df['rocov_anomaly'] = prediction
        true_outlier = [df.anomaly for df in list_of_df]
        if saveChart:
            for i in range(len(predicted_outlier)):
                plt.figure()

                plt.rcParams["font.family"] = "Times New Roman"
                csfont = {'fontname': 'Times New Roman'}
                plt.xlabel('Time', **csfont)
                plt.ylabel('Value', **csfont)
                plt.title('Robust covariance On File [{}]'.format(i + 1),
                          **csfont)

                predicted_outlier[i].plot(figsize=(12, 6),
                                          label='predictions',
                                          marker='o',
                                          markersize=5)
                true_outlier[i].plot(marker='o', markersize=2)

                # data = list_of_df[i]
                # plt.scatter(x=data[data['rocov_anomaly'] == data['anomaly']].index,
                #             y=data[data['rocov_anomaly'] == data['anomaly']]['anomaly'], label='True Prediction'
                #             , c='g', zorder=4)
                # plt.scatter(x=data[data['rocov_anomaly'] != data['anomaly']].index,
                #             y=data[data['rocov_anomaly'] != data['anomaly']]['anomaly'], label='False Prediction'
                #             , c='r', zorder=5)
                plt.legend(loc='upper right')
                plt.savefig(self.path_to_plt +
                            'anom/rocov-pre-{}.png'.format(i + 1),
                            format='png')
                print('Chart {} is Generated'.format(i + 1))
                plt.clf()
                plt.close('all')
        if saveChart:
            ts = 1
            for df in list_of_df:
                data = df.drop(['anomaly', 'changepoint'], axis=1)
                pc = PCA(n_components=2).fit_transform(data)
                df[['X', 'Y']] = pc
                plt.figure()
                sb.set(font='Times New Roman')
                sns = sb.scatterplot(data=df,
                                     x='X',
                                     y='Y',
                                     hue='rocov_anomaly',
                                     palette='bright')
                sns.set_title(
                    'The Anomaly Detected By Robust covariance, File {}'.
                    format(ts))
                sns.figure.savefig(self.path_to_plt +
                                   'chart/chart-{}.png'.format(ts))
                plt.close('all')
                print('The Chart of  File {} is Generated.'.format(ts))
                ts += 1
        if saveEvaluation:
            evaluator = Evaluator(true_outlier,
                                  predicted_outlier,
                                  metric='binary',
                                  numenta_time='30 sec')
            metrics = evaluator.getConfusionMetrics()
            TP = metrics['TP']
            TN = metrics['TN']
            FP = metrics['FP']
            FN = metrics['FN']
            print('\n-----------------------------------------------------')
            print('Robust covariance Outputs: ')
            print(f'\t False Alarm Rate: {round(FP / (FP + TN) * 100, 2)} %')
            print(f'\t Missing Alarm Rate: {round(FN / (FN + TP) * 100, 2)} %')
            print(
                f'\t Accuracy Rate: {round((TP + TN) / (TP + TN + FN + FP) * 100, 2)} %'
            )

            trainTime = np.array(self.en_tr_time).sum() - np.array(
                self.st_tr_time).sum()
            print(f'\t Train & Train Time {round(trainTime, 2)}s')

            data = {
                'far': round(FP / (FP + TN) * 100, 2),
                'mar': round(FN / (FN + TP) * 100, 2),
                'acc': round((TP + TN) / (TP + TN + FN + FP) * 100, 2),
                'tr': trainTime,
                'te': 0,
                'tp': TP,
                'tn': TN,
                'fp': FP,
                'fn': FN
            }
            output = OutputWriter(self.path_to_plt, 'RobustCov', data)
            output.write()
def hybrid_repo(is_test):
    b = Builder()
    ev = Evaluator()
    ev.split()
    ICM = b.build_ICM()

    URM_train, URM_test = train_test_holdout(b.get_URM(), train_perc=0.8)
    URM_train, URM_validation = train_test_holdout(URM_train, train_perc=0.9)

    from ParameterTuning.AbstractClassSearch import EvaluatorWrapper
    from Base.Evaluation.Evaluator import SequentialEvaluator

    evaluator_validation = SequentialEvaluator(URM_validation, cutoff_list=[5])
    evaluator_test = SequentialEvaluator(URM_test, cutoff_list=[5, 10])

    evaluator_validation = EvaluatorWrapper(evaluator_validation)
    evaluator_test = EvaluatorWrapper(evaluator_test)

    from KNN.ItemKNNCFRecommender import ItemKNNCFRecommender
    from ParameterTuning.BayesianSearch import BayesianSearch

    recommender_class = ItemKNNCFRecommender

    parameterSearch = BayesianSearch(recommender_class,
                                     evaluator_validation=evaluator_validation,
                                     evaluator_test=evaluator_test)

    from ParameterTuning.AbstractClassSearch import DictionaryKeys

    hyperparamethers_range_dictionary = {}
    hyperparamethers_range_dictionary["topK"] = [
        5, 10, 20, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800
    ]
    hyperparamethers_range_dictionary["shrink"] = [
        0, 10, 50, 100, 200, 300, 500, 1000
    ]
    hyperparamethers_range_dictionary["similarity"] = ["cosine"]
    hyperparamethers_range_dictionary["normalize"] = [True, False]

    recommenderDictionary = {
        DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
        DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
        DictionaryKeys.FIT_POSITIONAL_ARGS: dict(),
        DictionaryKeys.FIT_KEYWORD_ARGS: dict(),
        DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
        hyperparamethers_range_dictionary
    }

    output_root_path = "result_experiments/"

    import os

    # If directory does not exist, create
    if not os.path.exists(output_root_path):
        os.makedirs(output_root_path)

    output_root_path += recommender_class.RECOMMENDER_NAME

    n_cases = 2
    metric_to_optimize = "MAP"

    best_parameters = parameterSearch.search(recommenderDictionary,
                                             n_cases=n_cases,
                                             output_root_path=output_root_path,
                                             metric=metric_to_optimize)

    itemKNNCF = ItemKNNCFRecommender(URM_train)
    itemKNNCF.fit(**best_parameters)

    from FW_Similarity.CFW_D_Similarity_Linalg import CFW_D_Similarity_Linalg

    n_cases = 2
    metric_to_optimize = "MAP"

    best_parameters_ItemKNNCBF = parameterSearch.search(
        recommenderDictionary,
        n_cases=n_cases,
        output_root_path=output_root_path,
        metric=metric_to_optimize)

    itemKNNCBF = ItemKNNCBFRecommender(ICM, URM_train)
    itemKNNCBF.fit(**best_parameters_ItemKNNCBF)
    """
    #_____________________________________________________________________
    from ParameterTuning.BayesianSearch import BayesianSearch
    from ParameterTuning.AbstractClassSearch import DictionaryKeys

    from ParameterTuning.AbstractClassSearch import EvaluatorWrapper

    evaluator_validation_tuning = EvaluatorWrapper(evaluator_validation)
    evaluator_test_tuning = EvaluatorWrapper(evaluator_test)

    recommender_class = CFW_D_Similarity_Linalg

    parameterSearch = BayesianSearch(recommender_class,
                                     evaluator_validation=evaluator_validation_tuning,
                                     evaluator_test=evaluator_test_tuning)

    hyperparamethers_range_dictionary = {}
    hyperparamethers_range_dictionary["topK"] = [5, 10, 20, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800]
    hyperparamethers_range_dictionary["add_zeros_quota"] = range(0, 1)
    hyperparamethers_range_dictionary["normalize_similarity"] = [True, False]

    recommenderDictionary = {DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train, ICM, itemKNNCF.W_sparse],
                             DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                             DictionaryKeys.FIT_POSITIONAL_ARGS: dict(),
                             DictionaryKeys.FIT_KEYWORD_ARGS: dict(),
                             DictionaryKeys.FIT_RANGE_KEYWORD_ARGS: hyperparamethers_range_dictionary}

    output_root_path = "result_experiments/"

    import os

    # If directory does not exist, create
    if not os.path.exists(output_root_path):
        os.makedirs(output_root_path)

    output_root_path += recommender_class.RECOMMENDER_NAME

    n_cases = 2
    metric_to_optimize = "MAP"

    best_parameters_CFW_D = parameterSearch.search(recommenderDictionary,
                                                   n_cases=n_cases,
                                                   output_root_path=output_root_path,
                                                   metric=metric_to_optimize)

    CFW_weithing = CFW_D_Similarity_Linalg(URM_train, ICM, itemKNNCF.W_sparse)
    CFW_weithing.fit(**best_parameters_CFW_D)
    #___________________________________________________________________________________________-

    """

    from GraphBased.P3alphaRecommender import P3alphaRecommender

    P3alpha = P3alphaRecommender(URM_train)
    P3alpha.fit()

    from MatrixFactorization.PureSVD import PureSVDRecommender

    #pureSVD = PureSVDRecommender(URM_train)
    #pureSVD.fit()

    rec = HybridRec.HybridRec()

    S_UCM = b.get_S_UCM_KNN(b.get_UCM(ev.get_URM_train()), 600)
    S_ICM = b.build_S_ICM_knn(b.build_ICM(), 250)

    rec.fit(ev.get_URM_train(),
            ev.get_target_playlists(),
            ev.get_target_tracks(),
            ev.num_playlists_to_test,
            itemKNNCBF.W_sparse,
            itemKNNCF.W_sparse,
            P3alpha.W_sparse,
            is_test=True,
            alfa=0.7,
            avg=0.3)

    train_df = rec.recommend()

    if is_test:
        map5 = ev.map5(train_df)
        print('Hybrid MAP@10:', map5)
        return map5
    else:
        print('Prediction saved!')
        train_df.to_csv(os.path.dirname(os.path.realpath(__file__))[:-19] +
                        "/all/sub.csv",
                        sep=',',
                        index=False)
        return 0

    #hybridrecommender = ItemKNNSimilarityHybridRecommender(URM_train, itemKNNCF.W_sparse, P3alpha.W_sparse)
    #hybridrecommender.fit(alpha=0.5)

    #print(evaluator_validation.evaluateRecommender(hybridrecommender))
    """
Beispiel #6
0
model_path = sys.argv[3]
mean_train_path = os.path.join(os.sep.join(model_path.split(os.sep)[:-2]),
                               "document_mean_train.npy")
bad_sample_saving_path = sys.argv[6] if len(sys.argv) > 6 else None
test_postproc = int(sys.argv[5]) > 0 if len(sys.argv) > 6 else 0

if not bad_sample_saving_path is None:
    if (not os.path.isdir(bad_sample_saving_path)):
        os.mkdir(bad_sample_saving_path)

if test_postproc:
    from net_postproc import Net

model = Net(model_path, mean_train_path)

evaluator = Evaluator(labels_file_path)

image_names = os.listdir(test_path)
total_l1_loss = 0
total_l2_loss = 0
total_overlap = 0
total_overlap_score = 0
total_time = 0
num_of_images = 0

for fn in image_names:

    if not (fn.endswith("jpg") or fn.endswith("JPG")):
        continue

    print(fn)
Beispiel #7
0
def evaluator(name=None):
    from Evaluator import Evaluator
    return Evaluator(name)
Beispiel #8
0
def runSimulation(simType, defaultNet, wdir, odir, images, xsd, net, mesh,
                  xmlout, bound, netSchema, boundSchema, template, parameters,
                  diameters, days, xmlSol, xmlMesh, writeCsv, plotImages,
                  plotPressure, plotFlow, plotWss, plotReynolds, writePressure,
                  writeFlow, writeWss, writeReynolds, velocityProfile, results,
                  excludeWss, export, automaticResults, inputGnuid):
    '''Welcome and instructions messages.'''

    print "##########################################"
    print "############ Welcome to pyNS #############"
    print "## ./pyNS -h or --help for instructions ##"
    print "##########################################\n"
    '''Exporting results into txt files'''
    if export is not False:
        if not os.path.exists('Results/%s/exportedSolutions' % export):
            os.mkdir('Results/%s/exportedSolutions' % export)
        for f in mylistdir('Results/%s/json' % export):
            if f == 'info.json':
                pass
            else:
                print "exporting Results/%s/json/" % export + f
                exporting('Results/%s/json/' % export + f)
                new_file = f.split('.')[0] + '.txt'
                shutil.move(
                    'Results/%s/json/' % export + new_file,
                    'Results/%s/exportedSolutions/' % export + new_file)
        sys.exit(
            'All %s solutions exported successfully in Results/%s/exportedSolutions/ folder'
            % (export, export))

    if not results:
        if defaultNet is True:
            simType = 'specific'
            net = 'vascular_network_arterial_right_arm.xml'
            bound = 'boundary_conditions_arterial_right_arm.xml'
        elif template == 'willis':
            simType = 'specific'
            wdir = 'XML/Models/WillisCircle'
            net = 'vascular_network_willis.xml'
            bound = 'boundary_conditions_willis.xml'
        elif simType == 'specific':
            if net is None and bound is not None:
                sys.exit(
                    "Please provide a network graph XML input file or choose a generic simulation type."
                )
            elif net is not None and bound is None:
                sys.exit(
                    "Please provide a boundary conditions XML input file or choose a generic simulation type."
                )
            elif net is None and bound is None:
                sys.exit(
                    "Please provide either a network graph XML input file and a boundary conditions XML input file or choose a generic simulation type."
                )
    '''Checking matplotlib module for optional plotting methods.'''
    if plotImages or plotFlow or plotPressure or plotWss or plotReynolds or velocityProfile is True:
        try:
            import matplotlib
        except ImportError:
            sys.exit(
                'Matplotlib package is required for plotting solutions in .png files or computing velocityProfile videos.\nPlease download matplotlib from matplotlib.sourceforge.net.'
            )
    '''Loading previous specific results.'''
    if results is not False:
        while True:
            print "Starting webServer for post-processing results. Close it with CTRL-C."
            Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
            try:
                port = 8000
                httpd = SocketServer.TCPServer(("localhost", port), Handler)
            except:
                try:
                    pid = None
                    for line in os.popen("lsof -i:8000"):
                        fields = line.split()
                        pid = fields[1]
                    if pid:
                        os.system("kill %s" % pid)
                        time.sleep(5)
                    httpd = SocketServer.TCPServer(("localhost", port),
                                                   Handler)
                except:
                    connected = False
                    startPort = 8000
                    while not connected:
                        try:
                            httpd = SocketServer.TCPServer(
                                ("localhost", startPort), Handler)
                            connected = True
                            port = startPort
                        except:
                            startPort += 1

            if results == 'last':
                ip = "http://localhost:%s" % port
                webbrowser.open_new_tab(ip + '/Results/results.html')
            else:
                if os.path.exists('Results/' + results):
                    ip = "http://localhost:%s" % port
                    webbrowser.open_new_tab(ip + "/Results/" + results +
                                            "/results.html")
                else:
                    sys.exit('Error: ' + results +
                             ' directory does not exist.')
            httpd.serve_forever()
    '''Checking for webserver instance'''
    if automaticResults:
        try:
            ip = "http://localhost:8000"
            pid = None
            for line in os.popen("lsof -i:8000"):
                fields = line.split()
                pid = fields[1]
            if pid:
                os.system("kill %s" % pid)
            Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
            httpd = SocketServer.TCPServer(("localhost", 8000), Handler)
        except:
            connected = False
            startPort = 8000
            while not connected:
                try:
                    Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
                    httpd = SocketServer.TCPServer(("localhost", startPort),
                                                   Handler)
                    connected = True
                    port = startPort
                    ip = "http://localhost:%s" % port
                except:
                    startPort += 1
    '''SIMULATION'''
    '''Create XML and image directories'''
    if not os.path.exists(wdir):
        os.mkdir(wdir)
    if not os.path.exists(xsd):
        os.mkdir(xsd)
    '''If needed, creating output directory(s).'''
    if xmlSol is True or xmlMesh is True or writeFlow is True or writePressure is True or writeWss is True or writeReynolds is True:
        if not os.path.exists(odir):
            os.mkdir(odir)
    if writeFlow is True:
        ofdir = os.path.join(odir, 'Flow/')
        if not os.path.exists(ofdir):
            os.mkdir(ofdir)
    if writePressure is True:
        opdir = os.path.join(odir, 'Pressure/')
        if not os.path.exists(opdir):
            os.mkdir(opdir)
    if writeWss is True:
        owdir = os.path.join(odir, 'Wss/')
        if not os.path.exists(owdir):
            os.mkdir(owdir)
    if writeReynolds is True:
        oodir = os.path.join(odir, 'Other/')
        if not os.path.exists(oodir):
            os.mkdir(oodir)
    '''If needed, creating images directory.'''
    if plotImages is True:
        f_images = os.path.join(images, 'Flow/')
        p_images = os.path.join(images, 'Pressure/')
        w_images = os.path.join(images, 'Wss/')
        o_images = os.path.join(images, 'Other/')
        if not os.path.exists(images):
            os.mkdir(images)
            os.mkdir(f_images)
            os.mkdir(p_images)
            os.mkdir(w_images)
            os.mkdir(o_images)
    '''Setting variables.'''
    testTube = 'XML/TEST/CircularStraightTube/'
    netTube = 'vascular_network_v3.0_TUBE.xml'
    boundTube = 'boundary_conditions_v2.0_TUBE.xml'
    testTape = 'XML/TEST/CircularTaperedTube/'
    netTape = 'vascular_network_v3.0_TAPE.xml'
    boundTape = 'boundary_conditions_v2.0_TAPE.xml'
    testSimple = 'XML/TEST/SimpleNetwork/'
    netSimple = 'vascular_network_simple.xml'
    boundSimple = 'boundary_conditions_simple.xml'
    testing = 'XML/TEST/Testing/'
    testingNetwork = 'vascular_network_test.xml'
    testingBoundary = 'boundary_conditions_test.xml'

    if simType == 'specific':
        xmlnetpath = os.path.join(wdir, net)
        xmlboundpath = os.path.join(wdir, bound)
        preRun = True
    if simType == 'tube':
        xmlnetpath = os.path.join(testTube, netTube)
        xmlboundpath = os.path.join(testTube, boundTube)
        preRun = False
    if simType == 'tape':
        xmlnetpath = os.path.join(testTape, netTape)
        xmlboundpath = os.path.join(testTape, boundTape)
        preRun = False
    if simType == 'simple':
        xmlnetpath = os.path.join(testSimple, netSimple)
        xmlboundpath = os.path.join(testSimple, boundSimple)
        preRun = False
    if simType == 'testing':
        xmlnetpath = os.path.join(testing, testingNetwork)
        xmlboundpath = os.path.join(testing, testingBoundary)
        preRun = False

    xmlmeshpath = os.path.join(wdir, mesh)
    xmloutpath = os.path.join(odir, xmlout)
    xsdnetpath = os.path.join(xsd, netSchema)
    xsdboundpath = os.path.join(xsd, boundSchema)
    '''Setting adaptation and simulation days'''
    adaptation = Adaptation()
    daysList = map(int, list(linspace(-1, days, days + 2)))
    if excludeWss is True and days > 0:
        sys.exit(
            "Error: You can't exclude Wss computing for adaptation algorithm")
    '''Setting Simulation Context Parameters for Simulation'''
    simulationContext = SimulationContext()
    evaluator = Evaluator()
    evaluator.SetSimulationContext(simulationContext)
    simulationContext.SetEvaluator(evaluator)

    for day in daysList:
        if day <= 0:
            '''Parameters Model Adaptor'''
            if simType == 'generic':
                modelAdaptor = ModelAdaptor()
                modelAdaptor.SetSimulationContext(simulationContext)
                modelAdaptor.SetEvaluator(evaluator)
                modelAdaptor.ChoosingTemplate(parameters)
                if template == 'arm':
                    if day == -1:
                        modelAdaptor.ftype = 7
                    if modelAdaptor.arm == 0:
                        if modelAdaptor.ftype == 0:
                            wdir = 'XML/Models/Left_Arm/#0.Lower_RC_EE'
                            preRun = True
                        if modelAdaptor.ftype == 1:
                            wdir = 'XML/Models/Left_Arm/#1.Lower_RC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 2:
                            pass
                        if modelAdaptor.ftype == 3:
                            wdir = 'XML/Models/Left_Arm/#3.Upper_BC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 4:
                            pass
                        if modelAdaptor.ftype == 5:
                            wdir = 'XML/Models/Left_Arm/#5.Upper_BB_ES'
                            preRun = True
                        if modelAdaptor.ftype == 6:
                            pass
                        if modelAdaptor.ftype == 7:
                            wdir = 'XML/Models/Left_Arm/PRE'
                            preRun = False
                    if modelAdaptor.arm == 1:
                        if modelAdaptor.ftype == 0:
                            wdir = 'XML/Models/Right_Arm/#0.Lower_RC_EE'
                            preRun = True
                        if modelAdaptor.ftype == 1:
                            wdir = 'XML/Models/Right_Arm/#1.Lower_RC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 2:
                            pass
                        if modelAdaptor.ftype == 3:
                            wdir = 'XML/Models/Right_Arm/#3.Upper_BC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 4:
                            pass
                        if modelAdaptor.ftype == 5:
                            wdir = 'XML/Models/Right_Arm/#5.Upper_BB_ES'
                            preRun = True
                        if modelAdaptor.ftype == 6:
                            pass
                        if modelAdaptor.ftype == 7:
                            wdir = 'XML/Models/Right_Arm/PRE'
                            preRun = False

                netPostGeneric = 'vascular_network.xml'
                boundPostGeneric = 'boundary_conditions.xml'
                netPost = modelAdaptor.Idpat + '_vascular_network.xml'
                boundPost = modelAdaptor.Idpat + '_boundary_conditions.xml'
                xmlnetpathGeneric = os.path.join(wdir, netPostGeneric)
                xmlboundpathGeneric = os.path.join(wdir, boundPostGeneric)
                xmlnetpath = os.path.join(wdir, netPost)
                xmlboundpath = os.path.join(wdir, boundPost)
                simulationContext.ReadFromXML(xmlboundpathGeneric,
                                              xsdboundpath)
            else:
                simulationContext.ReadFromXML(xmlboundpath, xsdboundpath)

            if simType == 'generic':
                modelAdaptor.SettingParameters(parameters)
                modelAdaptor.AdaptingParameters(xmlboundpathGeneric,
                                                xmlboundpath)
            '''Creating NetworkGraph Object From its XML'''
            networkGraph = NetworkGraph()
            if simType == 'generic':
                networkGraph.ReadFromXML(xmlnetpathGeneric, xsdnetpath)
            else:
                networkGraph.ReadFromXML(xmlnetpath, xsdnetpath)
            '''NetworkGraph Model Adaptor'''
            if simType == 'generic':
                modelAdaptor.SetNetworkGraph(networkGraph)
                evaluator.SetNetworkGraph(networkGraph)
                if diameters is False:
                    csvfilepath = modelAdaptor.AdaptingModel(
                        xmlnetpathGeneric, xmlnetpath)
                else:
                    csvfilepath = modelAdaptor.AdaptingModel(
                        xmlnetpathGeneric, xmlnetpath, diameters)
            '''Setting results directory based on PatientID in networkGraph XML file'''

            if plotImages is False:
                try:
                    shutil.rmtree('Results/json')
                except:
                    pass
                try:
                    os.mkdir('Results/json')
                except:
                    pass
                if simType == 'generic':
                    idPat = modelAdaptor.Idpat
                elif template == 'willis':
                    idPat = template
                else:
                    idPat = simType
                if os.path.exists('Results/%s' % idPat):
                    pass
                else:
                    os.mkdir('Results/%s' % idPat)
                    os.mkdir('Results/%s/json' % idPat)
                    shutil.copytree('Results/css', 'Results/%s/css' % idPat)
                    shutil.copytree('Results/js', 'Results/%s/js' % idPat)
                    shutil.copy('Results/results.html',
                                'Results/%s/results.html' % idPat)
            '''Mesh generation, XML Network Graph is needed for creating XML Network Mesh.'''
            meshGenerator = MeshGenerator()
            meshGenerator.SetNetworkGraph(networkGraph)
            networkMesh = NetworkMesh()
            meshGenerator.SetNetworkMesh(networkMesh)
            meshGenerator.SetMaxLength(5.0e-2)
            meshGenerator.GenerateMesh()
        '''Setting Boundary Conditions Mesh input and reading XML Boundary Conditions File'''
        boundaryConditions = BoundaryConditions()
        boundaryConditions.SetSimulationContext(simulationContext)
        boundaryConditions.SetNetworkMesh(networkMesh)
        boundaryConditions.ReadFromXML(xmlboundpath, xsdboundpath)
        boundaryConditions.SetSpecificCardiacOutput()
        '''In case of a generic simulation, patient-specific generated files will be moved to Results folder.'''
        if simType == 'generic' and day < 0:
            shutil.move(os.path.abspath(xmlnetpath),
                        ('Results/%s/%s_pre_vascular_network.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(xmlboundpath),
                        ('Results/%s/%s_pre_boundary_conditions.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(csvfilepath),
                        ('Results/%s/%s_pre_patient_specific.csv' %
                         (idPat, idPat)))
        if simType == 'generic' and day == 0:
            shutil.copy(os.path.abspath(xmlnetpath),
                        ('Results/%s/%s_post_vascular_network.xml' %
                         (idPat, idPat)))
            shutil.copy(os.path.abspath(xmlboundpath),
                        ('Results/%s/%s_post_boundary_conditions.xml' %
                         (idPat, idPat)))
            shutil.copy(os.path.abspath(csvfilepath),
                        ('Results/%s/%s_post_patient_specific.csv' %
                         (idPat, idPat)))
        if simType == 'generic' and day > 0 and day == days:
            shutil.move(os.path.abspath(xmlnetpath),
                        ('Results/%s/%s_adapted_vascular_network.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(xmlboundpath),
                        ('Results/%s/%s_adapted_boundary_conditions.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(csvfilepath),
                        ('Results/%s/%s_adapted_patient_specific.csv' %
                         (idPat, idPat)))
        '''Setting Evaluator'''
        evaluator.SetNetworkGraph(networkGraph)
        evaluator.SetNetworkMesh(networkMesh)
        '''Adaptation Model'''
        adaptation.SetBoundaryConditions(boundaryConditions)
        adaptation.SetSimulationContext(simulationContext)
        preRun = adaptation.Adapt(day)
        if len(daysList) == 1:
            pass
        else:
            print "Day %d " % (day * 10)  #1 step represent 10 days
        ''' Setting Solver Class'''
        solver = SolverFirstTrapezoid()
        solver.SetNetworkMesh(networkMesh)
        solver.SetBoundaryConditions(boundaryConditions)
        solver.SetSimulationContext(simulationContext)
        solver.SetEvaluator(evaluator)
        '''Pre-run'''
        if preRun is True:
            solver.SetSteadyFlow()
            print "Steady Pre-Run, setting non-linear parameters"
            solver.Solve()
            parametersToLinear = ["Radius", "Compliance"]
            for el in networkMesh.Elements:
                el.SetLinearValues(parametersToLinear)
            networkMesh.checkLinearConsistence()
        '''Run'''
        evaluator.ExpressionCache = {}
        solver = SolverFirstTrapezoid()
        solver.SetNetworkMesh(networkMesh)
        solver.SetBoundaryConditions(boundaryConditions)
        solver.SetSimulationContext(simulationContext)
        solver.SetEvaluator(evaluator)
        solver.SetPulseFlow()
        print "Solving system"
        solver.Solve()
        '''Post Processing: Setting Solutions input and plotting some information and/or writing solutions to XML Solutions File'''
        '''User can choose two different post processing strategies. Saving images using matplotlib or visualize results in its browser'''
        '''If needed, pyNS writes xml mesh file'''
        if xmlMesh is True:
            meshdirpath = os.path.join(odir, str(day))
            if not os.path.exists(meshdirpath):
                os.mkdir(meshdirpath)
            xmlmeshpath = os.path.join(meshdirpath, mesh)
            outdirpath = os.path.join(odir, str(day))
            if not os.path.exists(outdirpath):
                os.mkdir(outdirpath)
            xmloutpath = os.path.join(outdirpath, xmlout)
            networkMesh.WriteToXML(xmlmeshpath)
        '''Setting NetworkSolutions'''
        print "->100%, Running post-processing"
        networkSolutions = NetworkSolutions()
        networkSolutions.SetNetworkMesh(networkMesh)
        networkSolutions.SetNetworkGraph(networkGraph)
        networkSolutions.SetSimulationContext(simulationContext)
        networkSolutions.SetSolutions(solver.Solutions)
        networkSolutions.WriteJsonInfo(days, networkMesh.Elements, idPat)
        adaptation.SetSolutions(day, networkSolutions)
        adaptation.SetRefValues(day, networkMesh)
        '''If needed, pyNS creates images subdirectory(s) for each adaptation step.'''
        if plotImages is True:
            daystr = str(day) + '/'
            f_dayImages = os.path.join(f_images, daystr)
            p_dayImages = os.path.join(p_images, daystr)
            w_dayImages = os.path.join(w_images, daystr)
            o_dayImages = os.path.join(o_images, daystr)
            if not os.path.exists(images):
                os.mkdir(images)
            if not os.path.exists(f_dayImages):
                os.mkdir(f_dayImages)
            if not os.path.exists(p_dayImages):
                os.mkdir(p_dayImages)
            if not os.path.exists(w_dayImages):
                os.mkdir(w_dayImages)
            if not os.path.exists(o_dayImages):
                os.mkdir(o_dayImages)
            networkSolutions.SetImagesPath({
                'im': images,
                'f': f_dayImages,
                'p': p_dayImages,
                'w': w_dayImages,
                'o': o_dayImages
            })
        '''If needed, pyNS creates output subdirectory(s) for each adaptation step.'''
        if writeFlow is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            f_dayOutput = os.path.join(ofdir, daystr)
            if not os.path.exists(f_dayOutput):
                os.mkdir(f_dayOutput)
        if writePressure is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            p_dayOutput = os.path.join(opdir, daystr)
            if not os.path.exists(p_dayOutput):
                os.mkdir(p_dayOutput)
        if writeWss is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            w_dayOutput = os.path.join(owdir, daystr)
            if not os.path.exists(w_dayOutput):
                os.mkdir(w_dayOutput)
        if writeReynolds is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            o_dayOutput = os.path.join(oodir, daystr)
            if not os.path.exists(o_dayOutput):
                os.mkdir(o_dayOutput)
        '''If needed, pyNS writes xml Solution file.'''
        if xmlSol is True:
            networkSolutions.WriteToXML(xmloutpath)
        '''Post process solution for each element of the network'''
        for element in networkMesh.Elements:
            if element.Type == 'WavePropagation' or element.Type == 'Resistance':
                networkSolutions.WriteJson(element.Id, day, excludeWss, idPat)
                if velocityProfile is True:
                    networkSolutions.SaveVelocityProfile(element, str(day))
                if plotFlow is True:
                    networkSolutions.PlotFlow(element.Id)
                if plotPressure is True:
                    networkSolutions.PlotPressure(element.Id)
                if plotWss is True:
                    networkSolutions.PlotWSS(element)
                if plotReynolds is True:
                    networkSolutions.PlotReynolds(element.Id)
                if writeFlow is True:
                    networkSolutions.WriteFlowOutput(
                        element.Id,
                        f_dayOutput + 'Flow_' + element.Name + '.txt')
                if writePressure is True:
                    networkSolutions.WritePressureInput(
                        element.Id,
                        p_dayOutput + '/p_in_' + element.Name + '.txt')
                    networkSolutions.WritePressureOutput(
                        element.Id,
                        p_dayOutput + '/p_out_' + element.Name + '.txt')
                    networkSolutions.WritePressureDrop(
                        element.Id,
                        p_dayOutput + '/p_drop_' + element.Name + '.txt')
                if writeWss is True:
                    networkSolutions.WriteWSSOutput(
                        element.Id,
                        w_dayOutput + 'WSS_' + element.Name + '.txt')
                if writeReynolds is True:
                    networkSolutions.WriteReynolds(
                        element.Id,
                        o_dayOutput + 'Reynolds' + element.Name + '.txt')
    '''Adaptation data'''
    if days > 0:
        networkSolutions.WriteJsonAdapt(adaptation, idPat)
        if writeCsv is True:
            networkSolutions.WriteToCsv(adaptation, 'Diameter')
            networkSolutions.WriteToCsv(adaptation, 'Pressure')
            networkSolutions.WriteToCsv(adaptation, 'Flow')
            networkSolutions.WriteToCsv(adaptation, 'Wss')
    '''Export GNUID'''
    if inputGnuid:
        networkSolutions.GetGnuidInformation(idPat, inputGnuid)

    print "\nJOB FINISHED"
    if automaticResults:
        try:
            shutil.copytree('Results/%s/json' % idPat,
                            'Results/json',
                            symlinks=True)
        except OSError:
            shutil.rmtree('Results/json')
            shutil.copytree('Results/%s/json' % idPat,
                            'Results/json',
                            symlinks=True)
        print "Starting webServer for post-processing results. Close it with CTRL-C."
        webbrowser.open_new_tab(ip + '/Results/results.html')
        httpd.serve_forever()
from End import EndSuccess
from End import EndFail

import rospy

CONFIDENCE_THRESHOLD = 20  # number of tennis ball pixels
DIST_THRESHOLD = 5
MAX_SPEED_AT_DIST = 10  # distance at which rover should be traveling at max speed
MAX_SPEED_AT_ANGLE = math.pi / 2  # angular distance at which rover should be turning at max speed
MIN_DRIVE_SPEED = 150
MIN_TURNING_SPEED = 180

seek_states = {
    "evaluator":
    Evaluator(CONFIDENCE_THRESHOLD, DIST_THRESHOLD, goalTracker),
    "seeker":
    Seeker(CONFIDENCE_THRESHOLD, MAX_SPEED_AT_DIST, MAX_SPEED_AT_ANGLE,
           MIN_DRIVE_SPEED, MIN_TURNING_SPEED_SPEED),
    "failure":
    EndFail(),
    "success":
    EndSuccess(),
}

seek_transitions = {
    "evaluator:far": "failure",
    "evaluator:close": "seeker",
    "evaluator:lost": "success",
    "seeker:reached": "exit:waypoint",
    "seeker:lost": "failure",
Beispiel #10
0
    print("Loading movie ratings...")
    data = source.loadMovieLensRating()
    print("Prepare movie information...")
    source.computeMovieInformation()
    print("Creating ranking for each movie ...")
    rankings = source.getPopularityRanksByRating()
    return (source, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(dataSource, data, rankings) = LoadData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(data, rankings)

contentKNN = KNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")

# Just make random recommendations
# Random = NormalPredictor()
# evaluator.AddAlgorithm(Random, "Random")

evaluator.Evaluate()

useTargetId = 85
totalMovieNeeded = 5
evaluator.GetRecomendationMovie(dataSource, useTargetId, totalMovieNeeded)
Beispiel #11
0
def astra(args, logger):
    """
        Self-training with weak supervivsion
        Leverages labeled, unlabeled data and weak rules for training a neural network
    """

    teacher_dev_res_list = []
    teacher_test_res_list = []
    teacher_train_res_list = []
    dev_res_list = []
    test_res_list = []
    train_res_list = []
    results = {}

    student_pred_list = []

    ev = Evaluator(args, logger=logger)

    logger.info("building student: {}".format(args.student_name))
    student = Student(args, logger=logger)

    logger.info("building teacher")
    teacher = Teacher(args, logger=logger)

    logger.info("loading data")
    dh = DataHandler(args, logger=logger, student_preprocess=student.preprocess, teacher_preprocess=teacher.preprocess)
    train_dataset = dh.load_dataset(method='train')
    train_dataset.oversample(args.oversample)  
    dev_dataset = dh.load_dataset(method='dev')
    test_dataset = dh.load_dataset(method='test')
    unlabeled_dataset = dh.load_dataset(method='unlabeled')

    logger.info("creating pseudo-dataset")
    pseudodataset = dh.create_pseudodataset(unlabeled_dataset)
    pseudodataset.downsample(args.sample_size)

    # Train Student
    newtraindataset = dh.create_pseudodataset(train_dataset)
    newtraindataset.balance('labels')
    newtraindataset.report_stats('labels')
    results['student_train'] = student.train(
        train_dataset=newtraindataset,
        dev_dataset=dev_dataset,
        train_label_name='labels',
        dev_label_name='labels',
    )
    train_res_list.append(results['student_train'])
    student.save('supervised_student')

    logger.info("\n\n\t*** Evaluating on dev data ***")
    results['supervised_student_dev'] = evaluate(student, dev_dataset, ev, "student dev")
    dev_res_list.append(results['supervised_student_dev'])

    logger.info("\n\n\t*** Evaluating on test data ***")
    results['supervised_student_test'], s_test_dict = evaluate_test(student, test_dataset, ev, "student test")
    test_res_list.append(results['supervised_student_test'])
    student_pred_list.append(s_test_dict)

    # Initialize Teacher
    logger.info("initializing teacher on unlabeled data with majority voting")
    teacher_res = teacher.train(pseudodataset)

    logger.info("evaluating majority voting")
    results['teacher_train'] = evaluate(teacher, train_dataset, ev, "teacher train")
    results['teacher_dev'] = evaluate(teacher, dev_dataset, ev, "teacher dev")
    results['teacher_test'] = evaluate(teacher, test_dataset, ev, "teacher test")
    teacher_train_res_list.append(results['teacher_train'])
    teacher_dev_res_list.append(results['teacher_dev'])
    teacher_test_res_list.append(results['teacher_test'])

    # Self-Training with Weak Supervision
    for iter in range(args.num_iter):
        logger.info("\n\n\t *** Starting loop {} ***".format(iter))

        # Create pseudo-labeled dataset
        pseudodataset.downsample(args.sample_size)

        # Add Student as extra rule in teacher.
        logger.info("Adding Student as extra rule in Teacher")
        teacher.student = student

        _ = teacher.train_ran(train_dataset=train_dataset, train_label_name='labels',
                              dev_dataset=dev_dataset, dev_label_name='labels',
                              unlabeled_dataset=pseudodataset)

        # Apply Teacher on unlabeled data
        teacher_pred_dict_unlabeled = teacher.predict_ran(dataset=pseudodataset)
        teacher_dev_res, t_dev_dict = evaluate_ran(teacher, dev_dataset, ev, "teacher dev iter{}".format(iter))
        teacher_dev_res_list.append(teacher_dev_res)

        teacher_test_res, t_test_dict = evaluate_ran(teacher, test_dataset, ev, "teacher test iter{}".format(iter))
        # analyze_rule_attention_scores(t_test_dict, logger, args.logdir, name='test_iter{}'.format(iter))
        teacher_test_res_list.append(teacher_test_res)

        # Update unlabeled data with Teacher's predictions
        pseudodataset.data['teacher_labels'] = teacher_pred_dict_unlabeled['preds']
        pseudodataset.data['teacher_proba'] = teacher_pred_dict_unlabeled['proba']
        pseudodataset.data['teacher_weights'] = np.max(teacher_pred_dict_unlabeled['proba'], axis=1)
        pseudodataset.drop(col='teacher_labels', value=-1)

        pseudodataset.balance('teacher_labels', proba='teacher_proba')
        pseudodataset.report_stats('teacher_labels')

        if len(set(teacher_pred_dict_unlabeled['preds'])) == 1:
            # Teacher predicts a single class
            logger.info("Self-training led to trivial predictions. Stopping...")
            break

        if len(pseudodataset) < 5:
            logger.info("[WARNING] Sampling led to only {} examples. Skipping iteration...".format(len(pseudodataset)))
            continue

        # Re-train student with weighted pseudo-instances
        logger.info('training student on pseudo-labeled instances provided by the teacher')
        train_res = student.train_pseudo(
            train_dataset=pseudodataset,
            dev_dataset=dev_dataset,
            train_label_name='teacher_proba' if args.soft_labels else 'teacher_labels',
            train_weight_name='teacher_weights' if args.loss_weights else None,
            dev_label_name='labels',
        )

        logger.info('fine-tuning the student on clean labeled data')
        train_res = student.finetune(
            train_dataset=newtraindataset,
            dev_dataset=dev_dataset,
            train_label_name='labels',
            dev_label_name='labels',
        )
        train_res_list.append(train_res)

        # Evaluate student performance and update records
        dev_res = evaluate(student, dev_dataset, ev, "student dev iter{}".format(iter))
        test_res, s_test_dict = evaluate_test(student, test_dataset, ev, "student test iter{}".format(iter))
        logger.info("Student Dev performance on iter {}: {}".format(iter, dev_res['perf']))
        logger.info("Student Test performance on iter {}: {}".format(iter, test_res['perf']))

        prev_max = max([x['perf'] for x in dev_res_list])
        if dev_res['perf'] > prev_max:
            logger.info("Improved dev performance from {:.2f} to {:.2f}".format(prev_max, dev_res['perf']))
            student.save("student_best")
            teacher.save("teacher_best")
        dev_res_list.append(dev_res)
        test_res_list.append(test_res)
        student_pred_list.append(s_test_dict)

    # Store Final Results
    logger.info("Final Results")
    teacher_all_dev = [x['perf'] for x in teacher_dev_res_list]
    teacher_all_test = [x['perf'] for x in teacher_test_res_list]
    teacher_perf_str = ["{}:\t{:.2f}\t{:.2f}".format(i, teacher_all_dev[i], teacher_all_test[i]) for i in np.arange(len(teacher_all_dev))]
    logger.info("TEACHER PERFORMANCES:\n{}".format("\n".join(teacher_perf_str)))

    all_dev = [x['perf'] for x in dev_res_list]
    all_test = [x['perf'] for x in test_res_list]
    perf_str = ["{}:\t{:.2f}\t{:.2f}".format(i, all_dev[i], all_test[i]) for i in np.arange(len(all_dev))]
    logger.info("STUDENT PERFORMANCES:\n{}".format("\n".join(perf_str)))

    # Get results in the best epoch (if multiple best epochs keep last one)
    best_dev_epoch = len(all_dev) - np.argmax(all_dev[::-1]) - 1
    best_test_epoch = len(all_test) - np.argmax(all_test[::-1]) - 1
    logger.info("BEST DEV {} = {:.3f} for epoch {}".format(args.metric, all_dev[best_dev_epoch], best_dev_epoch))
    logger.info("FINAL TEST {} = {:.3f} for epoch {} (max={:.2f} for epoch {})".format(args.metric,
                                                                                       all_test[best_dev_epoch], best_dev_epoch, all_test[best_test_epoch], best_test_epoch))
    results['teacher_train_iter'] = teacher_train_res_list
    results['teacher_dev_iter'] = teacher_dev_res_list
    results['teacher_test_iter'] = teacher_test_res_list

    results['student_train_iter'] = train_res_list
    results['student_dev_iter'] = dev_res_list
    results['student_test_iter'] = test_res_list

    results['student_dev'] = dev_res_list[best_dev_epoch]
    results['student_test'] = test_res_list[best_dev_epoch]
    results['teacher_dev'] = teacher_dev_res_list[best_dev_epoch]
    results['teacher_test'] = teacher_test_res_list[best_dev_epoch]
    
    # Save models and results
    student.save("student_last")
    teacher.save("teacher_last")
    save_and_report_results(args, results, logger)
    return results
            tips.extend(res.get('items'))
        return [
            Photo(self.make_link(tip['prefix'], tip['suffix']),
                  tip.get('createdAt')) for tip in tips
        ]

    def make_link(self, prefix, suffix):
        """
        Input:
        'prefix': 'https://igx.4sqi.net/img/user/',
        'suffix': '/13893908-H3NB1YDQ4ZKX3CGI.jpg',
        to link:
        'https://igx.4sqi.net/img/user/13893908-H3NB1YDQ4ZKX3CGI.jpg'
        :param param:
        :return: link to a photo
        """
        return prefix + '500x500' + suffix


fp = FProvider()
places = fp.get_venues_near(Point(59.9538695, 30.2659853), 20000)
e = Evaluator()
marks = [e.evaluate_place(p) for p in places[:5]]
for m, p in zip(marks, places):
    print(p)
    print(m)
    print()

# ia = ImageAnalytics()
# i = ia._load_photo("https://igx.4sqi.net/img/general/500x500/13893908_t7OjS4DdVPAV0gMJL5N6g_qM2UEUZUFndo5uHDtdVD0.jpg")
# print(i)
Beispiel #13
0
    #  'Anger': "Family Musical Comedy",
    #  'Depressing': "Drama Biography",
    #  "Confusing": 'Thriller Fantasy Crime',
    #  "Inspring": "Biography Documentary Sport War",
    # "Thrilling": "Horror Mystery"
}

for mood, c in moods.items():
    print(mood, c)
    # Load up common data set for the recommender algorithms
    (ml, evaluationData, rankings) = LoadMovieLensData(c)
    print("Searching for best parameters...")
    param_grid = {
        'n_epochs': [20, 30],
        'lr_all': [0.005, 0.010],
        'n_factors': [50, 100]
    }
    gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
    gs.fit(evaluationData)
    evaluator = Evaluator(evaluationData, rankings,
                          list(ml.movieID_to_name.keys()))
    params = gs.best_params['rmse']
    SVDtuned = SVD(n_epochs=params['n_epochs'],
                   lr_all=params['lr_all'],
                   n_factors=params['n_factors'])
    evaluator.AddAlgorithm(SVDtuned, "SVD - Tuned")

    print("--------------------------------\n")
    filename = mood + ".sav"
    evaluator.SampleTopNRecs(ml, filename)
    print("--------------------------------\n\n")
Beispiel #14
0
def run(episodes, discount, useStreak):
    start_time = time.time()
    print('run with: ' + str(discount) + str(useStreak))
    batch_size = 32
    hidden_dim = 300
    experienceSize = 20000
    epsilon_min_after = 1500
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    writer = SummaryWriter('runs/run_discount_' + str(discount) + '_streak_' +
                           str(useStreak))
    trainer = Trainer(hidden_dim, experienceSize, discount, batch_size, device,
                      writer)
    for e in range(episodes):
        observation = trainer.reset()
        done = False
        batchReward = 0
        steps = 0
        threes = 0
        while not done:
            action = trainer.policyAction(observation['board'], e,
                                          epsilon_min_after)
            old_obs = observation
            observation, reward, done, _ = trainer.step(int(action))
            reshaped = trainer.reshape(torch.tensor(observation['board']))
            threes += trainer.streakReward(trainer.player, reshaped,
                                           int(action))
            if useStreak:
                reward = trainer.change_reward_streak(reward, done, reshaped,
                                                      int(action), useStreak)
            else:
                reward = trainer.change_reward(reward, done)
            next_state = observation['board']
            exp = Experience(old_obs['board'], action, reward, next_state,
                             int(done))
            trainer.addExperience(exp)
            batchReward += reward
            loss = trainer.train()
            steps += 1
        threes /= 3
        if loss != None:
            writer.add_scalar('trainLoss', loss, e)
        writer.add_scalar('batchReward', batchReward, e)
        writer.add_scalar('steps', steps, e)
        writer.add_scalar('threes', threes, e)
        if (e % 1000 == 0) and (e > 0):
            trainer.switchPosition()
        if e % 50 == 0:
            trainer.synchronize()
            firstStep = str(
                trainer.policy(
                    trainer.reshape(torch.tensor(
                        trainer.reset()['board'])))[0])
            writer.add_text('first_qs', firstStep, e)
        if e % 50 == 0:
            trainer.save("model_state_discount_" + str(discount) +
                         '_useStreak_' + str(useStreak))
            # print(e)
            # print("episode: " + str(e) + " meanReward generateEpisodes: " + str(batchReward) + " meanLoss: " + str(loss))
            # print("steps: " + str(steps))
            # firstStep = str(trainer.policy(trainer.reshape(torch.tensor(trainer.reset()['board'])))[0])
            # with torch.no_grad():
            #     print(firstStep)
            evaluator = Evaluator(100, trainer)
            evaluator.winPercentage(e)

    # if e % 25000 == 0:
    #     trainer.switch()
    #     trainer.save("model_state_"+str(e))
    print("--- %s seconds ---" % (time.time() - start_time))
Beispiel #15
0
    getWindowSizePosAndNegSequence(fastaFileName, Train_or_Test, windowType,
                                   windowSize, fastaDir)
    positiveFile = "seq/" + Train_or_Test + windowType + "PosSequence.seq"
    negativeFile = "seq/" + Train_or_Test + windowType + "NegSequence.seq"
    test_aaindex_x, test_aaindex_y = InitAAindex().getTestAAIndex(
        positiveFile, negativeFile, selectedPercent=1, random_state=0)

    test_ook_x.shape = (test_ook_x.shape[0], test_ook_x.shape[2],
                        test_ook_x.shape[3])
    test_aaindex_x.shape = (test_aaindex_x.shape[0], test_aaindex_x.shape[2],
                            test_aaindex_x.shape[3])
    predict_probability = model.predict(x=[test_ook_x, test_aaindex_x],
                                        batch_size=1000,
                                        verbose=1)
    y_pred = predict_probability.argmax(axis=-1)
    result = Evaluator().calculate_performance(test_ook_y[:, 1], y_pred,
                                               predict_probability[:, 1])
    model_id = 1
    for rs in np.random.randint(10, 10000, 10):
        fastaFileName = "sspka_general_train.fasta"
        Train_or_Test = "Train"
        windowType = "OOK"
        windowSize = 26
        getWindowSizePosAndNegSequence(fastaFileName, Train_or_Test,
                                       windowType, windowSize, fastaDir)
        positiveFile = "seq/" + Train_or_Test + windowType + "PosSequence.seq"
        negativeFile = "seq/" + Train_or_Test + windowType + "NegSequence.seq"
        train_ook_X, train_ook_Y = PreOneOfKey().getTrainOneofkeyNLabel(
            positiveFile, negativeFile, selectedPercent=0.8, random_state=rs)
        windowType = "AAIndex"
        windowSize = 14
        getWindowSizePosAndNegSequence(fastaFileName, Train_or_Test,
Beispiel #16
0
logger.info('Saving model architecture')
with open(out_dir + '/model_arch.json', 'w') as arch:
    arch.write(model.to_json(indent=2))

logger.info(
    '---------------------------------------------------------------------------------------'
)

###############################################################################################################################
## Training
#

logger.info('Initial Evaluation:')
evl = Evaluator(logger,
                out_dir, (train_qn_x, train_ans_x, train_y),
                (dev_qn_x, dev_ans_x, dev_y), (test_qn_x, test_ans_x, test_y),
                model_type,
                batch_size_eval=batch_size_eval,
                print_info=True)
evl.evaluate(model, -1)

evl.print_info()

total_train_time = 0
total_eval_time = 0

for ii in range(nb_epoch):
    # Training
    train_input = [train_qn_x, train_ans_x]
    if model_type == 'cnnwang2016':
        train_input = [train_qn_x, train_qn_x, train_ans_x, train_ans_x]
Beispiel #17
0
# Initialize Dataset class
dataset = Dataset(data_dir=config.data_dir,
                  data_name=model_config.data_name,
                  train_ratio=model_config.train_ratio,
                  device=device)

log_dir = os.path.join('saves', config.model)
# Initialize Logger class
logger = Logger(log_dir)
model_config.save(os.path.join(logger.log_dir, 'config.json'))
# Get the position and target of the evaluated item
eval_pos, eval_target = dataset.eval_data()
# Get the popularity item
item_popularity = dataset.item_popularity
# Initialize Evaluator class
evaluator = Evaluator(eval_pos, eval_target, item_popularity,
                      model_config.top_k)
# Build the model
model = build_model(config.model, model_config, dataset.num_users,
                    dataset.num_items, device)
# Get the model info and data info
logger.info(model_config)
logger.info(dataset)

# Initialize Trainer class
trainer = Trainer(dataset=dataset,
                  model=model,
                  evaluator=evaluator,
                  logger=logger,
                  conf=model_config)
# Train the model and get results
trainer.train(experiment)
Beispiel #18
0
 def __init__(self, knowledge_file_path):
     self.knowledge_base = []
     self.values_table = {}
     self.evaluator = Evaluator(self.values_table)
     self.load_knowledge(knowledge_file_path)
Beispiel #19
0
        StructField("item", StringType(), True),
        StructField("rating", DoubleType(), True),
        StructField("timestamp", LongType(), True)
    ])
    ratingRdd = sc.textFile(inputPath).map(lambda line:line.split("::"))\
                    .map(lambda x:(x[0],x[1],float(x[2]),long(x[3])))
    ratingDf = spark.createDataFrame(data=ratingRdd, schema=schema)
    #     ratingDf,_ = ratingDf.randomSplit([1.0,9.0],seed=40)
    ratingDf = ratingDf.repartition(300)
    ratingDf.printSchema()
    ratingDf.show(5)
    n = ratingDf.count()
    #     ratingDf = ratingDf.withColumn('score',col('rating')*0+1).select('user','item','score')
    print 'total lines: %s' % n
    train, test = ratingDf.randomSplit([4.0, 1.0], seed=40)
    train.cache()
    test.cache()
    itemCF = ItemCFModel(df=train, spark=spark)
    itemCF.train()
    recTopN = recommend(train, itemCF.item_pair_sim, Normalization=True)
    pre = spark.createDataFrame(data=recTopN.flatMapValues(lambda x: x).map(
        lambda x: (x[0], x[1][0], x[1][1])),
                                schema=['user', 'item', 'rating'])
    evaluator = Evaluator(train, test, pre)
    (precise, recall, coverage, popularity) = evaluate(evaluator)
    print(
        'precise:%2.2f%%,  recall:%2.2f%%,  coverage:%2.2f%%,  popularity:%2.2f'
        % (precise * 100, recall * 100, coverage * 100, popularity))
    end = time.time()
    print 'spend %s s' % (end - start)
Beispiel #20
0
#
# Min Lee
# [email protected]
# MacOS
# Python
#
# In accordance with the class policies and Georgetown's Honor Code,
# I certify that, with the exceptions of the class resources and those
# items noted below, I have neither given nor received any assistance
# on this project.
#

import sys
from Classifier import backprop
from Evaluator import Evaluator

classifier = backprop(sys.argv)
evaluator = Evaluator(sys.argv)
performance = evaluator.evaluate(classifier, sys.argv)

print performance
Beispiel #21
0
 def evaluation(self,):
     self.evaluator = Evaluator(self.test_tuple, self.uid_recommend, self.num_user, self.num_item, self.topK)
     self.evaluator.prec_recall()
Beispiel #22
0
    ]
    pos_human_tokens_list = [
        word_tokenize(sentence) for sentence in pos_human_list
    ]

    neg_input_tokens_list = [
        word_tokenize(sentence) for sentence in neg_input_list
    ]
    neg_labels_tokens_list = [
        word_tokenize(sentence) for sentence in neg_labels_list
    ]
    neg_human_tokens_list = [
        word_tokenize(sentence) for sentence in neg_human_list
    ]

    sen_analyzer = Evaluator(dataset=dataset)

    ######### Self BLEU ###########
    print(
        "---- BLEU score of the transferred texts against the original ones ----"
    )

    pos_to_neg_self = sen_analyzer.self_bleu(pos_labels_tokens_list,
                                             pos_input_tokens_list)
    print("Positive to negative: ", pos_to_neg_self)

    neg_to_pos_self = sen_analyzer.self_bleu(neg_labels_tokens_list,
                                             neg_input_tokens_list)
    print("Negative to positive: ", neg_to_pos_self)

    ######### Sentiment ############
Beispiel #23
0
'''
@author: Joshua
'''
from Parser import Parser
from SymbolTable import TableBuilder
from Evaluator import Evaluator
# from PostFixConverter import inToPost
# from LexicalAnalyzer import Lexer

# ''' Creating Parse Tree '''
Parse = Parser('TestProgram.txt')
root = Parse.parse()
# root.printTree()
''' Creating Symbol Table '''
tableBuilder = TableBuilder(root)
tableBuilder.addSymbols()
#tableBuilder.printTable()
''' Running Evaluation '''
symbolTable = tableBuilder.returnTable()
eval = Evaluator(root, symbolTable)
eval.evaluate()
Beispiel #24
0
from ACO.ACO_Intensificator import Intensificator
from ACO.ACO_Main import ACO

aco_initializer = ACO_Initializer()
aco_solutiongenerator = SolutionGenerator()
aco_evaporator = Evaporator()
aco_intensificator = Intensificator()

initializer = Heuristic_Initializer()
#initializer = Random_Initializer()
selector = Roulette_Selector()
recombiner = Recombiner()
mutator = Route_Mutator()
replacer = Replacer()
task = Task()
evaluator = Evaluator()

aco_iterations = 5
aco_sorter = ACO_Sorter(aco_initializer, aco_solutiongenerator, aco_evaporator,
                        aco_intensificator, task.distance_matrix,
                        aco_iterations)
pop_size = 10
offspring_size = 5
mutate_prop = 0.2
iterations = 20

ga = Genetic_Alrorithm(initializer=initializer,
                       selector=selector,
                       recombiner=recombiner,
                       mutator=mutator,
                       replacer=replacer,
    for s in splitted:
        array.append(int(s))
    return array


if __name__ == "__main__":

    llvmutils = LlvmUtils(llvmpath='/usr/bin/',
                          clangexe='clang-10',
                          optexe='opt-10',
                          llcexe='llc-10')
    llvmfiles = LlvmFiles(
        basepath='./',
        source_bc='polybench_small/polybench_small_original.bc',
        jobid='solution')
    evaluator = Evaluator(runs=10)

    plot_labels = [
        'total codelines', 'codelines', 'labels', 'conditional jumps',
        'unconditional jumps', 'function labels', 'function calls'
    ]

    llvmutils.toAssembly(llvmfiles.get_original_bc(), "original.ll")
    llvmutils.toExecutable(llvmfiles.get_original_bc(), "original.o")
    evaluator.evaluate("original.ll", "./original.o")

    original_results = []
    #original_results.append(evaluator.get_runtime())
    original_results.append(evaluator.get_total_codelines())
    original_results.append(evaluator.get_codelines())
    original_results.append(evaluator.get_tags())
Beispiel #26
0
    def tapToAll(self, game_state: GameState):
        self.eval = Evaluator()

        player_left = game_state.values[0][0]
        player_right = game_state.values[0][1]
        ai_left = game_state.values[1][0]
        ai_right = game_state.values[1][1]

        probability_move = []
        utility_value = [0, 0, 0, 0, 0]

        if (game_state.player == 1):  #Ai turn
            probability_move.append(
                GameState(0, (player_left + ai_left) % 5, player_right,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_left) % 5,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, (player_left + ai_right) % 5, player_right,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_right) % 5,
                          ai_left, ai_right))
            if ((ai_left + ai_right) % 2 == 0):
                probability_move.append(
                    GameState(0, player_left, player_right,
                              int((ai_left + ai_right) / 2),
                              int((ai_left + ai_right) / 2)))
        else:  #Player turn
            probability_move.append(
                GameState(1, player_left, player_right,
                          (player_left + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right,
                          (player_right + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left,
                          (player_left + ai_right) % 5))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left,
                          (player_right + ai_right) % 5))
            if ((player_left + player_right) % 2 == 0):
                probability_move.append(
                    GameState(1, int((player_left + player_right) / 2),
                              int((player_left + player_right) / 2), ai_left,
                              ai_right))

        for i in range(len(probability_move)):
            print("i:", i, end=' \t')
            probability_move[i].print()
            if ai_left == 0:
                utility_value[0] = utility_value[1] = None
            if ai_right == 0:
                utility_value[2] = utility_value[3] = None
            if player_left == 0:
                utility_value[0] = utility_value[2] = None
            if player_right == 0:
                utility_value[1] = utility_value[3] = None

            if ((ai_left == ai_right) or (((ai_left + ai_right) % 2) != 0)
                ) and probability_move[i].player == 0:
                utility_value[4] = None
            if ((player_left == player_right) or
                ((player_left + player_right) % 2) != 0
                ) and probability_move[i].player == 1:
                utility_value[4] = None

            if utility_value[i] is not None:
                utility_value[i] = self.eval.evaluate(probability_move[i], 1)

        print("UTILITY ", utility_value)

        return utility_value, probability_move
    def run(self):

        backend.clear_session()
        tf.reset_default_graph()
        cv2.destroyAllWindows()

        self.stop = False
        print(device_lib.list_local_devices())

        # make frames from content video
        frames, self.fps = get_frames(self.content_path, self.height,
                                      self.width)
        print('Content wideo fps:', self.fps)

        frames = np.asarray(frames)
        frames_count = 0
        temp_frames = []
        for frame in frames:
            temp_frames.append(
                preprocess(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), True,
                           self.height, self.width))
            frames_count += 1
        print('wideo frames: ', frames_count)

        # have to make placehodler with no knowing one dimension of frames to concet inputs
        content_image = tf.placeholder(tf.float32,
                                       shape=(None, self.height, self.width,
                                              3))

        # load style image
        style_path = self.style_path
        style = preprocess(load_img(style_path, self.height, self.width), True,
                           self.height, self.width)
        style_image = tf.Variable(style)

        # make placeholder for our target new frames
        combination_image = tf.placeholder(tf.float32,
                                           shape=(None, self.height,
                                                  self.width, 3))

        # make placehholder for previous generated frame to develop video loss
        previous_combination = tf.placeholder(tf.float32,
                                              shape=(1, self.height,
                                                     self.width, 3))

        # inputs for CNN
        input_tensor = tf.concat(
            [content_image, style_image, combination_image], axis=0)

        # define CNN model
        model = VGG16(input_tensor=input_tensor,
                      weights='imagenet',
                      pooling=max,
                      include_top=False)

        layers = dict([(layer.name, layer.output) for layer in model.layers])

        loss = backend.variable(0.)

        # add content loss
        layer_features = layers[self.contet_layers[0]]
        content_image_features = layer_features[0, :, :, :]
        combination_features = layer_features[2, :, :, :]
        loss += self.content_weight * content_loss(content_image_features,
                                                   combination_features)

        # add style loss
        for layer_name in self.style_layers:
            layer_features = layers[layer_name]
            style_features = layer_features[1, :, :, :]
            combination_features = layer_features[2, :, :, :]
            loss += (self.style_weight / len(self.style_layers)) * style_loss(
                style_features, combination_features, self.height, self.width)

        # add neigbour losses
        loss += self.neighbour_weight * neighbour_loss(previous_combination,
                                                       combination_image)

        # add total variation regularizer
        loss += total_variation_loss(combination_image, self.height,
                                     self.width)

        # compute gradients
        grads = backend.gradients(loss, combination_image)

        # create variable outputs to store loss and gradients
        outputs = [loss]
        outputs += grads

        out_frames = []
        x = np.random.uniform(0, 255, (1, self.height, self.width, 3)) - 128.
        prev = x.copy()
        z = x.copy()

        evaluator = Evaluator(self.height, self.width)

        for i in range(frames_count):

            # compute tensorflow graph to get loss value and gradients
            f_outputs = backend.function(
                [combination_image, previous_combination],
                outputs,
                feed_dict={content_image: temp_frames[i]})

            evaluator.set_data(f_outputs)
            iterations = self.iterations

            print('Current frame: ', i)
            self.framelb.setText(str(i + 1))

            for j in range(iterations):

                if not self.stop:
                    print('Start of iteration', j)
                    self.iterlb.setText(str(j + 1))
                    start_time = time.time()

                    x, min_val, info = optimize.fmin_l_bfgs_b(
                        evaluator.loss,
                        x.flatten(),
                        args=(prev.flatten(), ),
                        fprime=evaluator.grads,
                        maxfun=20)

                    print('Current loss value:', min_val)
                    end_time = time.time()
                    print('Iteration %d completed in %ds' %
                          (j, end_time - start_time))

                    z = x.copy()
                    z = deprocess(z, self.height, self.width)
                    im = Image.fromarray(z)

                    # show actual generated image on gui
                    im = ImageQt(im)
                    pix = QPixmap.fromImage(im)
                    self.outlb.setPixmap(pix)

                else:
                    break

            out_frames.append(z)

            # have to initialize the optimalization for the wraped frame
            if (i + 1 < frames_count):
                f_prev = cv2.cvtColor(frames[i], cv2.COLOR_BGR2GRAY)
                f_next = cv2.cvtColor(frames[i + 1], cv2.COLOR_BGR2GRAY)
                flow = optical_flow(f_prev, f_next)
                # the next initalization will be x

                x = preprocess(warp(z, flow), True, self.height, self.width)
            else:
                x = preprocess(z, True, self.height, self.width)

            prev = x.copy()

        video_name = '../generated/out.avi'

        fourcc = cv2.VideoWriter_fourcc(*'MPEG')
        video = cv2.VideoWriter(video_name, fourcc, int(self.fps),
                                (self.height, self.width))

        for ima in out_frames:
            video.write(cv2.cvtColor(ima, cv2.COLOR_RGB2BGR))

        del evaluator
        cv2.destroyAllWindows()
        video.release()
        backend.clear_session()
        tf.reset_default_graph()
Beispiel #28
0
def train_model(data_path, log_path, seed, use_gpu = False):
    """Train model using negative sampling.
    Args:
    - model
    - optim: optimizer
    - train_q_embed: Embedding object for training queries, shape (nb
      train queries, dim)
    - dev_q_embed: Embedding object for dev queries, shape (nb dev
      queries, dim)
    - dev_q_cand_ids: list containing candidate ID of each dev query
      (None if it is not a candidate), used to compute MAP on dev set.
    - train_pairs: array of (query ID, hypernym ID) pairs
      for training
    - dev_pairs: array of (query ID, hypernym ID) pairs for
      validation
    - hparams: dict containing settings of hyperparameters
    - log_path: path of log file
    - seed: seed for RNG
    """
    cand_embed, train_q_embed, dev_q_embed, dev_q_cand_ids, train_pairs, dev_pairs = load_data(data_path)
    model, optim = init_model(cand_embed, train_q_embed, use_gpu)
    
    # Extract hyperparameter settings
    nb_neg_samples = hparams["nb_neg_samples"]
    subsample = hparams["subsample"]
    max_epochs = hparams["max_epochs"]
    patience = hparams["patience"]
    batch_size = hparams["batch_size"]
    clip = hparams["clip"]

    if seed:
        random.seed(seed)
        np.random.seed(seed)

    # Prepare sampling of negative examples
    candidate_ids = list(range(model.get_nb_candidates()))
    cand_sampler = make_sampler(candidate_ids)

    # Prepare subsampling of positive examples
    pos_sample_prob = {}
    if subsample:
        hyp_fd = {}
        for h_id in train_pairs[:,1]:
            if h_id not in hyp_fd:
                hyp_fd[h_id] = 0
            hyp_fd[h_id] += 1
        min_freq = min(hyp_fd.values())
        for (h_id, freq) in hyp_fd.items():
            pos_sample_prob[h_id] = sqrt(min_freq / freq)

    # Check if we're using CUDA
    if model.use_cuda:
        device=torch.device("cuda")
    else:
        device=torch.device("cpu")

    # Initialize training batch for query IDs, positive hypernym IDs,
    # negative hypernym IDs, positive targets, and negative targets.
    # targets. We separate positive and negative examples to compute
    # the losses separately. Note that this is a bit inefficient, as
    # we compute the query projections twice.
    batch_q = np.zeros(batch_size, 'int64')
    batch_h_pos = np.zeros((batch_size,1), 'int64')
    batch_h_neg = np.zeros((batch_size,nb_neg_samples), 'int64')
    t_pos_var = torch.ones((batch_size,1), requires_grad=False, device=device)
    t_neg_var = torch.zeros((batch_size,nb_neg_samples), requires_grad=False, device=device)

    # Prepare list of sets of gold hypernym IDs for queries in
    # training set. This is used for negative sampling.
    nb_train_queries = train_q_embed.weight.shape[0]
    train_gold_ids = [set() for _ in range(nb_train_queries)]
    nb_train_pairs = train_pairs.shape[0]
    for i in range(nb_train_pairs):
        q_id = int(train_pairs[i,0])
        h_id = int(train_pairs[i,1])
        train_gold_ids[q_id].add(h_id)

    # Prepare list of sets of gold hypernym IDs for queries in dev set
    # to compute score (MAP)
    nb_dev_queries = dev_q_embed.weight.shape[0]
    dev_gold_ids = [set() for _ in range(nb_dev_queries)]
    nb_dev_pairs = dev_pairs.shape[0]
    for i in range(nb_dev_pairs):
        q_id = int(dev_pairs[i,0])
        h_id = int(dev_pairs[i,1])
        dev_gold_ids[q_id].add(h_id)


    # Prepare input variables to compute loss on dev set
    dev_q_ids = torch.tensor(dev_pairs[:,0], dtype=torch.int64, device=device)
    dev_q_var = dev_q_embed(dev_q_ids)
    dev_h_var = torch.tensor(dev_pairs[:,1], dtype=torch.int64, requires_grad=False, device=device).unsqueeze(1)
    dev_t_var = torch.ones((nb_dev_pairs,1), dtype=torch.float32, requires_grad=False, device=device)

    # Make Evaluator to compute MAP on dev set
    dev_eval = Evaluator(model, dev_q_embed, dev_q_cand_ids)

    print("\nEvaluating untrained model on dev set...")
    MAP = dev_eval.get_MAP(dev_gold_ids)
    AP = dev_eval.get_AP(dev_gold_ids)
#     pAk = dev_eval.get_p_at_k(1, dev_gold_ids)
    MRR = dev_eval.get_MRR(dev_gold_ids)
    print("MAP: {:.4f}".format(MAP))
    print("AP: {:.4f}".format(AP))
#     print("P@1: {:.4f}".format(pAk))
    print("MRR: {:.4f}".format(MRR))

    checkpoint_header = ["Epoch", "Updates", "PosLoss", "NegLoss", 
                         "DevLoss", "DevMAP", "DevAP", "DevMRR", "TimeElapsed"]
    with open(log_path, "w") as f:
        f.write("\t".join(checkpoint_header) + "\n")

    # Train model
    best_model = deepcopy(model)
    best_score = float("-inf")
    nb_no_gain = 0
    batch_row_id = 0
    done = False
    start_time = time.time()
    print("\nStarting training...\n")
    print("\t".join(checkpoint_header))
    for epoch in range(1,max_epochs+1):
        model.train()
        np.random.shuffle(train_pairs)
        total_pos_loss = 0.0
        total_neg_loss = 0.0

        # Loop through training pairs
        nb_updates = 0
        for pair_ix in range(train_pairs.shape[0]):
            q_id = train_pairs[pair_ix,0]
            h_id = train_pairs[pair_ix,1]
            if subsample and random.random() >= pos_sample_prob[h_id]:
                continue
            batch_q[batch_row_id] = q_id
            batch_h_pos[batch_row_id] = h_id

            # Get negative examples
            neg_samples = []
            while len(neg_samples) < nb_neg_samples:
                cand_id = next(cand_sampler)
                if cand_id not in train_gold_ids[q_id]:
                    neg_samples.append(cand_id)
            batch_h_neg[batch_row_id] = neg_samples

            # Update on batch
            batch_row_id = (batch_row_id + 1) % batch_size
            if batch_row_id + 1 == batch_size:
                q_ids = torch.tensor(batch_q, dtype=torch.int64, requires_grad=False, device=device) 
                q_var = train_q_embed(q_ids)
                h_pos_var = torch.tensor(batch_h_pos, dtype=torch.int64, requires_grad=False, device=device)
                h_neg_var = torch.tensor(batch_h_neg, dtype=torch.int64, requires_grad=False, device=device)
                optim.zero_grad()
                pos_loss = model.get_loss(q_var, h_pos_var, t_pos_var)
                neg_loss = model.get_loss(q_var, h_neg_var, t_neg_var)
                loss = pos_loss + neg_loss
                loss.backward()
                if clip > 0:
                    torch.nn.utils.clip_grad_norm_(train_q_embed.parameters(), clip)
                    torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
                optim.step()
                total_pos_loss += pos_loss.item()
                total_neg_loss += neg_loss.item()
                nb_updates += 1

        # Check progress
        avg_pos_loss = total_pos_loss / (nb_updates * batch_size)
        avg_neg_loss = total_neg_loss / (nb_updates * batch_size)

        # Compute loss and MAP on dev set
        model.eval()
        dev_loss = model.get_loss(dev_q_var, dev_h_var, dev_t_var)
        avg_dev_loss = dev_loss.item() / nb_dev_pairs
        MAP = dev_eval.get_MAP(dev_gold_ids)
        AP = dev_eval.get_AP(dev_gold_ids)
#         pAk = dev_eval.get_p_at_k(1, dev_gold_ids)
        MRR = dev_eval.get_MRR(dev_gold_ids)
        checkpoint_data = []
        checkpoint_data.append(str(epoch))
        checkpoint_data.append(str(nb_updates))
        checkpoint_data.append("{:.4f}".format(avg_pos_loss))
        checkpoint_data.append("{:.4f}".format(avg_neg_loss))
        checkpoint_data.append("{:.4f}".format(avg_dev_loss))
        checkpoint_data.append("{:.4f}".format(MAP))
        checkpoint_data.append("{:.4f}".format(AP))
#         checkpoint_data.append("{:.4f}".format(pAk))
        checkpoint_data.append("{:.4f}".format(MRR))
        checkpoint_data.append("{:.1f}s".format(time.time()-start_time))
        print("\t".join(checkpoint_data))
        with open(log_path, "a") as f:
            f.write("\t".join(checkpoint_data)+"\n")

        # Early stopping
        if MAP > best_score:
            best_score = MAP
            best_model = deepcopy(model)
            nb_no_gain = 0
        else:
            nb_no_gain += 1
        if nb_no_gain >= patience:
            print("EARLY STOP!")
            done = True            
            print("\nEvaluating best model on dev set...")
            dev_eval.set_model(best_model)
            MAP = dev_eval.get_MAP(dev_gold_ids)
            print("MAP of best model: {:.3f}".format(MAP))
        if done:
            break
    print("\nTraining finished after {} epochs".format(epoch))
    return best_model
Beispiel #29
0
    data = ml.loadPCLatestSmall()
    print(
        "\nComputing product popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadPCData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

# User-based KNN
UserKNN = KNNBasic(sim_options={'name': 'pearson', 'user_based': True})
evaluator.AddAlgorithm(UserKNN, "User KNN")

# Item-based KNN
ItemKNN = KNNBasic(sim_options={'name': 'pearson', 'user_based': False})
evaluator.AddAlgorithm(ItemKNN, "Item KNN")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

# Fight!
evaluator.Evaluate(False)
random.seed(0)


def LoadData():
    movielens = MovieLensData()
    print(
        "Loading all ratings and computing popularity ranks from Movie Lens..."
    )
    data = movielens.loadMovieLens()
    ranks = movielens.computePopularity()
    return (movielens, data, ranks)


(movielens, data_evaluation, ranks) = LoadData()

evaluator = Evaluator(data_evaluation, ranks)

ContentBased = ContentFiltering()

itemKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': False}, k=15)

Hybrid = HybridAlgorithm([ContentBased, itemKNN], [0.75, 0.25])

# print("Computing content-based similarity based on Genre, Year and Mise En Scene similarity")
evaluator.AddAlgorithm(ContentBased, "Content Based Filtering")

evaluator.AddAlgorithm(itemKNN, "Item-Based Collaborative Filtering")

evaluator.AddAlgorithm(Hybrid, "Hybrid")

evaluator.Evaluate(True)