def begin_train(dynamic_path, static_path, out_path):

    dynamic_features = utilities.load_adjective_phase(dynamic_path)
    static_features = utilities.load_adjective_phase(static_path)
    adjective_list = utilities.adjectives
    for adjective in adjective_list:
        # File name 
        dataset_file_name = "_".join(("trained", adjective))+".pkl"
        newpath = os.path.join(out_path, "trained_adjectives")
        path_name = os.path.join(newpath, dataset_file_name)
        if os.path.exists(path_name):
            print "File %s already exists, skipping it." % path_name
            continue

        overall_best_score = 0.0
        dataset = defaultdict()
        dataset['classifier'] = None
        dataset['training_score'] = overall_best_score

        dynamic_train = utilities.get_all_train_test_features(adjective, dynamic_features) 
        y_labels = dynamic_train[1]
        object_ids = dynamic_train[2]
        dynamic_scaler = preprocessing.StandardScaler().fit(dynamic_train[0])
        dynamic_X = dynamic_scaler.transform(dynamic_train[0])
        dynamic_kernel = linear_kernel(dynamic_X, -2)
        #dynamic_kernel = standardize(dynamic_kernel)

        static_train = utilities.get_all_train_test_features(adjective, static_features) 
        static_scaler = preprocessing.StandardScaler().fit(static_train[0])
        static_X = static_scaler.transform(static_train[0])
        static_kernel = linear_kernel(static_X, -2)
        #static_kernel = standardize(static_kernel)

        alpha_range = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
        #alpha_range = [0.5]
        for alpha in alpha_range:
            print "Beginning %s, alpha %1.1f at %s" % (adjective,alpha, time.asctime())
            combined_kernel = (alpha)*static_kernel + (1-alpha)*dynamic_kernel;
            trained_clf, best_score = gram_grid_search(combined_kernel, labels=y_labels, object_ids=object_ids, refined_range = adjective)
            print "F1: %1.5f at %s" % (best_score, time.asctime())
            if best_score > overall_best_score:
                overall_best_score = best_score
                dataset['classifier'] = trained_clf
                dataset['training_score'] = best_score
                dataset['alpha'] = alpha

        dataset['dynamic_features'] = dynamic_features[adjective]
        dataset['static_features'] = static_features[adjective]
        dataset['adjective'] = adjective
        dataset['dynamic_scaler'] = dynamic_scaler
        dataset['dynamic_train_scaled'] = dynamic_X;
        dataset['dynamic_kernel_mean'] = np.mean(dynamic_kernel)
        dataset['dynamic_kernel_std'] = np.std(dynamic_kernel)
        dataset['static_scaler'] = static_scaler
        dataset['static_train_scaled'] = static_X;
        dataset['static_kernel_mean'] = np.mean(static_kernel)
        dataset['static_kernel_std'] = np.std(static_kernel)
   
        print "Saving trained_classifier" 

        # Save the results in the folder
        with open(path_name, "w") as f:
            print "Saving file: ", path_name
            cPickle.dump(dataset, f, protocol=cPickle.HIGHEST_PROTOCOL)
Exemplo n.º 2
0
def begin_train(dynamic_path, static_path, out_path):

    dynamic_features = utilities.load_adjective_phase(dynamic_path)
    static_features = utilities.load_adjective_phase(static_path)
    adjective_list = utilities.adjectives
    for adjective in adjective_list:
        # File name
        dataset_file_name = "_".join(("trained", adjective)) + ".pkl"
        newpath = os.path.join(out_path, "trained_adjectives")
        path_name = os.path.join(newpath, dataset_file_name)
        if os.path.exists(path_name):
            print "File %s already exists, skipping it." % path_name
            continue

        overall_best_score = 0.0
        dataset = defaultdict()
        dataset['classifier'] = None
        dataset['training_score'] = overall_best_score

        dynamic_train = utilities.get_all_train_test_features(
            adjective, dynamic_features)
        y_labels = dynamic_train[1]
        object_ids = dynamic_train[2]
        dynamic_scaler = preprocessing.StandardScaler().fit(dynamic_train[0])
        dynamic_X = dynamic_scaler.transform(dynamic_train[0])
        dynamic_kernel = linear_kernel(dynamic_X, -2)
        #dynamic_kernel = standardize(dynamic_kernel)

        static_train = utilities.get_all_train_test_features(
            adjective, static_features)
        static_scaler = preprocessing.StandardScaler().fit(static_train[0])
        static_X = static_scaler.transform(static_train[0])
        static_kernel = linear_kernel(static_X, -2)
        #static_kernel = standardize(static_kernel)

        alpha_range = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
        #alpha_range = [0.5]
        for alpha in alpha_range:
            print "Beginning %s, alpha %1.1f at %s" % (adjective, alpha,
                                                       time.asctime())
            combined_kernel = (alpha) * static_kernel + (
                1 - alpha) * dynamic_kernel
            trained_clf, best_score = gram_grid_search(combined_kernel,
                                                       labels=y_labels,
                                                       object_ids=object_ids,
                                                       refined_range=adjective)
            print "F1: %1.5f at %s" % (best_score, time.asctime())
            if best_score > overall_best_score:
                overall_best_score = best_score
                dataset['classifier'] = trained_clf
                dataset['training_score'] = best_score
                dataset['alpha'] = alpha

        dataset['dynamic_features'] = dynamic_features[adjective]
        dataset['static_features'] = static_features[adjective]
        dataset['adjective'] = adjective
        dataset['dynamic_scaler'] = dynamic_scaler
        dataset['dynamic_train_scaled'] = dynamic_X
        dataset['dynamic_kernel_mean'] = np.mean(dynamic_kernel)
        dataset['dynamic_kernel_std'] = np.std(dynamic_kernel)
        dataset['static_scaler'] = static_scaler
        dataset['static_train_scaled'] = static_X
        dataset['static_kernel_mean'] = np.mean(static_kernel)
        dataset['static_kernel_std'] = np.std(static_kernel)

        print "Saving trained_classifier"

        # Save the results in the folder
        with open(path_name, "w") as f:
            print "Saving file: ", path_name
            cPickle.dump(dataset, f, protocol=cPickle.HIGHEST_PROTOCOL)
    def compute_probability_vector(self, bolt_obj):

        # First object - initialize variables to store
        # Also clears out the vectors for new run
        if bolt_obj.state == bolt_obj.TAP:
            # Store results as they come in
            self.adjective_vectors_static = dict()
            self.adjective_vectors_dynamic = dict()
            self.all_motion_results = dict()
            self.mkl_results = dict()

        # Store dictionary of strings
        self.state_string = {
            bolt_obj.DISABLED: 'disabled',
            bolt_obj.THERMAL_HOLD: 'thermal_hold',
            bolt_obj.SLIDE: 'slide',
            bolt_obj.SQUEEZE: 'squeeze',
            bolt_obj.TAP: 'tap',
            bolt_obj.DONE: 'done',
            bolt_obj.SLIDE_FAST: 'slide_fast',
            bolt_obj.CENTER_GRIPPER: 'center_gripper'
        }

        # store dictionary for detailed states
        self.detailed_states = {
            bolt_obj.DISABLED: 'MOVE_ARM_START_POSITION',
            bolt_obj.SQUEEZE: 'SQUEEZE_SET_PRESSURE_SLOW',
            bolt_obj.THERMAL_HOLD: 'HOLD_FOR_10_SECONDS',
            bolt_obj.SLIDE: 'SLIDE_5CM',
            bolt_obj.SLIDE_FAST: 'MOVE_DOWN_5CM'
        }

        # Get the current motion
        current_motion = self.state_string[bolt_obj.state]

        # Check if state passed in should be processed
        if bolt_obj.state not in self.detailed_states:
            return
        else:
            # Get detailed state if exists
            current_detailed_state = self.detailed_states[bolt_obj.state]

            # Check if the state is a the disabled state
            if bolt_obj.state == bolt_obj.DISABLED:
                self.norm_bolt_obj = upenn_features.pull_detailed_state(
                    bolt_obj, current_detailed_state)
                return
            else:
                self.bolt_object = upenn_features.pull_detailed_state(
                    bolt_obj, current_detailed_state)

            # Checks to make sure that the norm obj has been created
            if not self.norm_bolt_obj:
                print "Warning: there is no normalization data"

            # Build the static features
            static_feature_object, self.static_features_array = upenn_features.extract_static_features(
                self.bolt_object, self.norm_bolt_obj)
            static_feats = upenn_features.createFeatureVector(
                static_feature_object)

            # Store all feature vectors into one large vector array
            # Technically we don't need to store the static
            # features by adjective - but it's cleaner this way
            for classifier in self.all_classifiers:

                adj = classifier['adjective']

                # need to add pkl to the name b/c the hmm chain dictionary
                # takes adjective.pkl as its key
                hmm_adj_name = '.'.join((adj, 'pkl'))

                # Initialize the dictionary with adjective
                if adj in self.adjective_vectors_static:
                    pass
                else:
                    self.adjective_vectors_static[adj] = list()
                    self.adjective_vectors_dynamic[adj] = list()

                # Pull out chain associated with adjective
                # ordering of sensors - [pac, pdc, electrodes, tac]
                sensor_hmm = ['pac', 'pdc', 'electrodes', 'tac']
                hmm_features_phase = []
                for sensor in sensor_hmm:
                    hmm_chain = self.hmm_chains[hmm_adj_name].chains[
                        current_detailed_state][sensor]
                    hmm_data = self.bolt_object.hmm[sensor]

                    # fill in dyanmic features
                    hmm_features_phase.append(hmm_chain.score(hmm_data))

                # Store the feature vector by adjective away
                self.adjective_vectors_static[adj].append(static_feats)
                self.adjective_vectors_dynamic[adj].append(hmm_features_phase)

            # Check if all motions have been performed
            # If so - feed into classifier
            if len(self.adjective_vectors_dynamic[adj]) == 4:
                print 'All motions received! Computing adjective scores'
                for classifier in self.all_classifiers:

                    # Pull out which adjective we are working on
                    adj = classifier['adjective']

                    # Load training vectors for kernel creation
                    train_dynamic = utilities.get_all_train_test_features(
                        adj, self.training_dynamic, train=True)
                    train_static = utilities.get_all_train_test_features(
                        adj, self.training_static, train=True)

                    # Scale the training data
                    d_scaler = preprocessing.StandardScaler().fit(
                        train_dynamic[0])
                    s_scaler = preprocessing.StandardScaler().fit(
                        train_static[0])
                    train_dynamic_scaled = d_scaler.transform(train_dynamic[0])
                    train_static_scaled = s_scaler.transform(train_static[0])

                    # Pull out the feature vectors for static/dynamic
                    all_static_feats = np.hstack(
                        self.adjective_vectors_static[adj])
                    all_dynamic_feats = np.hstack(
                        self.adjective_vectors_dynamic[adj])

                    # Normalize the features using scaler
                    all_static_feats_scaled = classifier[
                        'static_scaler'].transform(all_static_feats)
                    all_dynamic_feats_scaled = classifier[
                        'dynamic_scaler'].transform(all_dynamic_feats)

                    # Create kernels for both static and dynamic
                    static_kernel = self.linear_kernel_test(
                        all_static_feats_scaled, train_static_scaled, 1)
                    dynamic_kernel = self.linear_kernel_test(
                        all_dynamic_feats_scaled, train_dynamic_scaled, 1)

                    # Merge the two kernels
                    alpha = classifier['alpha']
                    merged_kernel = (alpha) * static_kernel + (
                        1 - alpha) * dynamic_kernel

                    # Predict adjective with computed kernel
                    clf = classifier['classifier']
                    self.mkl_results[adj] = clf.predict(merged_kernel)

                # Store off the adjectives that returned true
                adjectives_found = []
                for adj in self.mkl_results:
                    if self.mkl_results[adj] == 1:
                        adjectives_found.append(Adj(adj))

                publish_string = AdjList()
                publish_string = adjectives_found

                # Print and publish results!
                print "Results from MKL classification"
                #print self.mkl_results
                print str(adjectives_found)
                self.adjectives_pub.publish(publish_string)
def test_adjective(classifier, adjective_report ):
    
    true_positives = 0.0
    true_negatives = 0.0
    false_positives = 0.0
    false_negatives = 0.0

    false_positive_list = []
    false_negative_list = []
    true_positive_list = []
    true_negative_list = []

    adjective = classifier['adjective']

    dynamic_features = utilities.load_adjective_phase('/home/imcmahon/Desktop/mkl/dynamic/adjective_phase_set')
    static_features = utilities.load_adjective_phase('/home/imcmahon/Desktop/mkl/static/adjective_phase_set')
    #dynamic_features = utilities.load_adjective_phase(dynamic_path)
    #static_features = utilities.load_adjective_phase(static_path)
    

    #import pdb; pdb.set_trace()
    #Dynamic Train
    dynamic_train = utilities.get_all_train_test_features(adjective, dynamic_features, train=True)
    dynamic_train_scaler = preprocessing.StandardScaler().fit(dynamic_train[0])
    dynamic_train_scaled_X = dynamic_train_scaler.transform(dynamic_train[0])
    dynamic_train_kernel = linear_kernel(dynamic_train_scaled_X, -2) 
    #dynamic_train_kernel = standardize(dynamic_train_kernel)
    #Static Train
    static_train = utilities.get_all_train_test_features(adjective, static_features, train=True)
    static_train_scaler = preprocessing.StandardScaler().fit(static_train[0])
    static_train_scaled_X = static_train_scaler.transform(static_train[0])
    static_train_kernel = linear_kernel(static_train_scaled_X, -2) 
    #static_train_kernel = standardize(static_train_kernel)
    #Recompute the GRAM matrix
    #alpha = classifier['alpha'];
    #train_X = (alpha)*static_train_kernel + (1-alpha)*dynamic_train_kernel

    #import pdb; pdb.set_trace()
    dynamic_test = utilities.get_all_train_test_features(adjective, dynamic_features, train=False)
    dynamic_test_scaled_X = classifier['dynamic_scaler'].transform(dynamic_test[0])
    dynamic_kernel = linear_kernel_test(dynamic_test_scaled_X, dynamic_train_scaled_X, -2)
    #dynamic_kernel = (dynamic_kernel - classifier['dynamic_kernel_mean']) / classifier['dynamic_kernel_std']
    
    static_test = utilities.get_all_train_test_features(adjective, static_features, train=False)
    static_test_scaled_X = classifier['static_scaler'].transform(static_test[0])
    static_kernel = linear_kernel_test(static_test_scaled_X, static_train_scaled_X, -2)
    #static_kernel = (static_kernel - classifier['static_kernel_mean']) / classifier['static_kernel_std']

    alpha = classifier['alpha'];

    test_X = (alpha)*static_kernel + (1-alpha)*dynamic_kernel

    print '\n \nTesting Adjective: %s' % classifier['adjective']
    
    #Pull out test features/labels

    #for phase in phases:
    #import pdb; pdb.set_trace()
    #test_set = classifier['test']
    #test_X.append(test_set['features'])
    #test_X.append(test_set['features'])
    test_Y = dynamic_test[1]
    object_ids = dynamic_test[2]
    #object_names = test_set['object_names']

    
    # Pull out the classifier and merge features
    #test_X = np.concatenate(test_X, axis=1)
    #import pdb; pdb.set_trace()
    
    clf = classifier['classifier']
    c_dict[classifier['adjective']] = clf.C
    #import pdb; pdb.set_trace()
    print clf

    # Predict the labels!
    #if 'scaler' in classifier:
    #    if type(classifier['scaler']) == preprocessing.Scaler:
    #        test_X = classifier['scaler'].transform(test_X)
            
    #import pdb; pdb.set_trace() 
    output = clf.predict(test_X)
    # Determine if the true label and classifier prediction match
    for val in xrange(len(test_Y)):
        true_label = test_Y[val]
        predict_label = output[val]

        if true_label == 1:
            if predict_label == 1:
                true_positives += 1.0
                #true_positive_list.append(object_names[val])
            else:
                false_negatives += 1.0
                #false_negative_list.append(object_names[val])
        else: # label is 0
            if predict_label == 1:
                false_positives += 1.0
                #false_positive_list.append(object_names[val])
            else:
                true_negatives += 1.0
                #true_negative_list.append(object_names[val])
    
    # Compute statistics for the adjective
    try: 
        precision = true_positives / (true_positives + false_positives)
        recall = true_positives / (true_positives + false_negatives)
    
    except ZeroDivisionError: # The case when none are found
        precision = 0
        recall = 0
    try:
        f1 = 2.0 * precision*recall / (precision + recall)
    except ZeroDivisionError:
        f1 = 0
    print "Precision: %f, Recall: %f, F1: %f \n" % (precision, recall, f1)
    print "Alpha = %1.1f" % alpha
    adjective_report.write("%s, %1.1f, %f, %f, %f\n" % (classifier['adjective'], alpha, precision, recall, f1))

    print "%d False Positive Objects\n" % false_positives
    print "%d False Negative Objects\n" % false_negatives
    print "%d True Positive Objects\n" % true_positives
    print "%d True Negative Objects\n" % true_negatives
    
    return (alpha, precision, recall, f1)
    def compute_probability_vector(self, bolt_obj):

        if len(self.bolt_object_list) == 0:
            self.movement_time = time.time()
        # First object - initialize variables to store
        # Also clears out the vectors for new run
        if bolt_obj.state  == bolt_obj.TAP:
            # Store results as they come in
            self.adjective_vectors_static = dict() 
            self.adjective_vectors_dynamic = dict() 
            self.all_motion_results = dict()
            self.mkl_results = dict()
        
        # Store dictionary of strings
        self.state_string = {bolt_obj.DISABLED:'disabled',
                    bolt_obj.THERMAL_HOLD:'thermal_hold',
                    bolt_obj.SLIDE:'slide',
                    bolt_obj.SQUEEZE:'squeeze',
                    bolt_obj.TAP:'tap',
                    bolt_obj.DONE:'done',
                    bolt_obj.SLIDE_FAST:'slide_fast',
                    bolt_obj.CENTER_GRIPPER:'center_gripper'
                    }   
        
        # store dictionary for detailed states
        self.detailed_states = {bolt_obj.DISABLED:'MOVE_ARM_START_POSITION',
                                bolt_obj.SQUEEZE:'SQUEEZE_SET_PRESSURE_SLOW',
                                bolt_obj.THERMAL_HOLD:'HOLD_FOR_10_SECONDS',
                                bolt_obj.SLIDE:'SLIDE_5CM',
                                bolt_obj.SLIDE_FAST:'MOVE_DOWN_5CM'
                                }

        if (len(self.bolt_object_list) < 5):
            print len(self.bolt_object_list)
            self.bolt_object_list.append(bolt_obj)
            return
        else:
            self.bolt_object_list.append(bolt_obj)
            motion_end_time = time.time()
            print("Elapsed time was %g seconds" % (motion_end_time - self.movement_time))
            start_time = time.time()

            for bolt_obj_v in self.bolt_object_list:

                # Get the current motion 
                current_motion = self.state_string[bolt_obj_v.state] 
            
                # Check if state passed in should be processed
                if bolt_obj_v.state not in self.detailed_states:
                    print "Skipping state: %s" % current_motion 
                    continue 
                else:
                    # Get detailed state if exists
                    current_detailed_state = self.detailed_states[bolt_obj_v.state] 

                    # Check if the state is a the disabled state
                    if bolt_obj_v.state == bolt_obj_v.DISABLED:
                        self.norm_bolt_obj = upenn_features.pull_detailed_state(bolt_obj_v,current_detailed_state)
                        continue 
                    else:
                        cur_bolt_object = upenn_features.pull_detailed_state(bolt_obj_v, current_detailed_state)

                     # Checks to make sure that the norm obj has been created
                    if not self.norm_bolt_obj:
                        print "Warning: there is no normalization data"

                    #Start storing the bolt objects

                    # Build the static features 
                    static_feature_object, self.static_features_array = upenn_features.extract_static_features(cur_bolt_object, self.norm_bolt_obj)
                    static_feats = upenn_features.createFeatureVector(static_feature_object)
               
                    # Store all feature vectors into one large vector array
                    # Technically we don't need to store the static 
                    # features by adjective - but it's cleaner this way
                    for classifier in self.all_classifiers:

                        adj = classifier['adjective']

                        # need to add pkl to the name b/c the hmm chain dictionary
                        # takes adjective.pkl as its key
                        hmm_adj_name = '.'.join((adj,'pkl'))
                    
                        # Initialize the dictionary with adjective
                        if adj in self.adjective_vectors_static:
                            pass
                        else:
                            self.adjective_vectors_static[adj] = list()
                            self.adjective_vectors_dynamic[adj] = list()
              
                        # Pull out chain associated with adjective
                        # ordering of sensors - [pac, pdc, electrodes, tac]
                        sensor_hmm = ['pac', 'pdc', 'electrodes', 'tac']
                        hmm_features_phase = [];
                        for sensor in sensor_hmm:
                            hmm_chain = self.hmm_chains[hmm_adj_name].chains[current_detailed_state][sensor]
                            hmm_data = cur_bolt_object.hmm[sensor]

                            # fill in dyanmic features
                            hmm_features_phase.append(hmm_chain.score(hmm_data))
            
                        # Store the feature vector by adjective away
                        self.adjective_vectors_static[adj].append(static_feats)
                        self.adjective_vectors_dynamic[adj].append(hmm_features_phase)

            # Check if all motions have been performed
            # If so - feed into classifier
            if len(self.adjective_vectors_dynamic[adj]) == 4:
                print 'All motions received! Computing adjective scores'
                for classifier in self.all_classifiers:
          
                    # Pull out which adjective we are working on        
                    adj = classifier['adjective'] 
            
                    # Load training vectors for kernel creation
                    train_dynamic = utilities.get_all_train_test_features(adj, self.training_dynamic, train=True)
                    train_static = utilities.get_all_train_test_features(adj, self.training_static, train=True)
                    
                    # Scale the training data
                    d_scaler = preprocessing.StandardScaler().fit(train_dynamic[0])
                    s_scaler = preprocessing.StandardScaler().fit(train_static[0])
                    train_dynamic_scaled = d_scaler.transform(train_dynamic[0])
                    train_static_scaled = s_scaler.transform(train_static[0]) 
 
                    # Pull out the feature vectors for static/dynamic
                    all_static_feats = np.hstack(self.adjective_vectors_static[adj])
                    all_dynamic_feats = np.hstack(self.adjective_vectors_dynamic[adj])
               
                    # Normalize the features using scaler
                    all_static_feats_scaled = classifier['static_scaler'].transform(all_static_feats)
                    all_dynamic_feats_scaled = classifier['dynamic_scaler'].transform(all_dynamic_feats)

                    # Create kernels for both static and dynamic
                    static_kernel = self.linear_kernel_test(all_static_feats_scaled, train_static_scaled, 1)
                    dynamic_kernel = self.linear_kernel_test(all_dynamic_feats_scaled, train_dynamic_scaled, 1)

                    # Merge the two kernels
                    alpha = classifier['alpha']
                    merged_kernel = (alpha)*static_kernel + (1-alpha)*dynamic_kernel

                    # Predict adjective with computed kernel
                    clf = classifier['classifier']
                    self.mkl_results[adj] = clf.predict(merged_kernel)

                # Store off the adjectives that returned true
                adjectives_found = []
                for adj in self.mkl_results:
                    if self.mkl_results[adj] == 1:
                        adjectives_found.append(Adj(adj))

                hardcode_adj = [Adj('hard'), Adj('metallic'), Adj('cool')]
                publish_string = AdjList()
                #publish_string = adjectives_found
                publish_string = hardcode_adj;
                # Print and publish results!
                print "Results from MKL classification"
                #print self.mkl_results
                print str(adjectives_found) 
                end_time = time.time()
                print("Elapsed time was %g seconds" % (end_time - start_time))
                self.adjectives_pub.publish(publish_string)
def test_adjective(classifier, adjective_report):

    true_positives = 0.0
    true_negatives = 0.0
    false_positives = 0.0
    false_negatives = 0.0

    false_positive_list = []
    false_negative_list = []
    true_positive_list = []
    true_negative_list = []

    adjective = classifier['adjective']

    dynamic_features = utilities.load_adjective_phase(
        '/home/imcmahon/Desktop/mkl/dynamic/adjective_phase_set')
    static_features = utilities.load_adjective_phase(
        '/home/imcmahon/Desktop/mkl/static/adjective_phase_set')
    #dynamic_features = utilities.load_adjective_phase(dynamic_path)
    #static_features = utilities.load_adjective_phase(static_path)

    #import pdb; pdb.set_trace()
    #Dynamic Train
    dynamic_train = utilities.get_all_train_test_features(adjective,
                                                          dynamic_features,
                                                          train=True)
    dynamic_train_scaler = preprocessing.StandardScaler().fit(dynamic_train[0])
    dynamic_train_scaled_X = dynamic_train_scaler.transform(dynamic_train[0])
    dynamic_train_kernel = linear_kernel(dynamic_train_scaled_X, -2)
    #dynamic_train_kernel = standardize(dynamic_train_kernel)
    #Static Train
    static_train = utilities.get_all_train_test_features(adjective,
                                                         static_features,
                                                         train=True)
    static_train_scaler = preprocessing.StandardScaler().fit(static_train[0])
    static_train_scaled_X = static_train_scaler.transform(static_train[0])
    static_train_kernel = linear_kernel(static_train_scaled_X, -2)
    #static_train_kernel = standardize(static_train_kernel)
    #Recompute the GRAM matrix
    #alpha = classifier['alpha'];
    #train_X = (alpha)*static_train_kernel + (1-alpha)*dynamic_train_kernel

    #import pdb; pdb.set_trace()
    dynamic_test = utilities.get_all_train_test_features(adjective,
                                                         dynamic_features,
                                                         train=False)
    dynamic_test_scaled_X = classifier['dynamic_scaler'].transform(
        dynamic_test[0])
    dynamic_kernel = linear_kernel_test(dynamic_test_scaled_X,
                                        dynamic_train_scaled_X, -2)
    #dynamic_kernel = (dynamic_kernel - classifier['dynamic_kernel_mean']) / classifier['dynamic_kernel_std']

    static_test = utilities.get_all_train_test_features(adjective,
                                                        static_features,
                                                        train=False)
    static_test_scaled_X = classifier['static_scaler'].transform(
        static_test[0])
    static_kernel = linear_kernel_test(static_test_scaled_X,
                                       static_train_scaled_X, -2)
    #static_kernel = (static_kernel - classifier['static_kernel_mean']) / classifier['static_kernel_std']

    alpha = classifier['alpha']

    test_X = (alpha) * static_kernel + (1 - alpha) * dynamic_kernel

    print '\n \nTesting Adjective: %s' % classifier['adjective']

    #Pull out test features/labels

    #for phase in phases:
    #import pdb; pdb.set_trace()
    #test_set = classifier['test']
    #test_X.append(test_set['features'])
    #test_X.append(test_set['features'])
    test_Y = dynamic_test[1]
    object_ids = dynamic_test[2]
    #object_names = test_set['object_names']

    # Pull out the classifier and merge features
    #test_X = np.concatenate(test_X, axis=1)
    #import pdb; pdb.set_trace()

    clf = classifier['classifier']
    c_dict[classifier['adjective']] = clf.C
    #import pdb; pdb.set_trace()
    print clf

    # Predict the labels!
    #if 'scaler' in classifier:
    #    if type(classifier['scaler']) == preprocessing.Scaler:
    #        test_X = classifier['scaler'].transform(test_X)

    #import pdb; pdb.set_trace()
    output = clf.predict(test_X)
    # Determine if the true label and classifier prediction match
    for val in xrange(len(test_Y)):
        true_label = test_Y[val]
        predict_label = output[val]

        if true_label == 1:
            if predict_label == 1:
                true_positives += 1.0
                #true_positive_list.append(object_names[val])
            else:
                false_negatives += 1.0
                #false_negative_list.append(object_names[val])
        else:  # label is 0
            if predict_label == 1:
                false_positives += 1.0
                #false_positive_list.append(object_names[val])
            else:
                true_negatives += 1.0
                #true_negative_list.append(object_names[val])

    # Compute statistics for the adjective
    try:
        precision = true_positives / (true_positives + false_positives)
        recall = true_positives / (true_positives + false_negatives)

    except ZeroDivisionError:  # The case when none are found
        precision = 0
        recall = 0
    try:
        f1 = 2.0 * precision * recall / (precision + recall)
    except ZeroDivisionError:
        f1 = 0
    print "Precision: %f, Recall: %f, F1: %f \n" % (precision, recall, f1)
    print "Alpha = %1.1f" % alpha
    adjective_report.write(
        "%s, %1.1f, %f, %f, %f\n" %
        (classifier['adjective'], alpha, precision, recall, f1))

    print "%d False Positive Objects\n" % false_positives
    print "%d False Negative Objects\n" % false_negatives
    print "%d True Positive Objects\n" % true_positives
    print "%d True Negative Objects\n" % true_negatives

    return (alpha, precision, recall, f1)