def cascade():
    """
    :return:
    Beginning of cascade classification. Still being developed.
    """
    pred = 0
    bdt = joblib.load('models/adaboost256.pkl')
    frame = cv2.imread('images/gradient.jpg')
    # X = cf.compute_chans(frame).ravel().reshape(1, -1)
    # X = cf.compute_chans(cv2.resize(frame, (64, 128))).ravel().reshape(1, -1)
    x = cf.compute_chans(cv2.resize(frame, (64, 128))).ravel()
    # np.random.shuffle(x)
    N = 100
    pred = [None] * N
    X = np.array([x for i in range(N)])
    start = time.time()
    for i in range(N):
        for t, estimator in enumerate(bdt.estimators_):
            pred += _samme_proba(estimator, 2, X)
            p_t = pred[0, 1] / (t + 1)
            print('p_t is: ', p_t)

            if p_t < -.2 and t > 8:
                return False

        return False
        out = casc.cascade(X, bdt)
    print((time.time() - start) / N)
    start = time.time()
    for i in range(N):
        out = bdt.predict(X)
    print((time.time() - start) / N)

    print(type(bdt))
Esempio n. 2
0
def get_aggressive_tweets(tweets):
    content = [t[1] for t in tweets]
    id_date = [(t[0], t[2]) for t in tweets]
    results = []

    write_out_data(
        tweets, '/proj/nlp/users/terra/streaming_results/correlate_tmp.csv')
    predictions = cascade(
        '/proj/nlp/users/terra/streaming_results/correlate_tmp.csv')

    for p in range(0, len(predictions)):
        if predictions[p] == 1:
            results.append(id_date[p])

    return results
Esempio n. 3
0
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 16 21:49:56 2014

@author: haggertr
"""
    
from matrix_from_xls import matrix_from_xls as mfx
import sys
sys.path.append('c:\\code\\cascade\\')  # location of module
import cascade as csc


#file = 'C:\\Users\\haggertr\\Desktop\\Documents\\work - OSU\\research\\WW2100\\Research\\results2\\Cascade_plots\\save tmp\\testdata.xls'
#file = 'C:\\code\\matrix-from-data\\testdata.xls'
file = 'C:\\code\\matrix-from-data\\WS1_CO2.xls'
#file = '1e__IjqMLBBVLqBHWZiIORkPbz6W8PQlSPVeodQBM8Oc'  #google key. Add kwarg filetype='gsheet'

a = mfx(file,1,48,16)

b = csc.cascade(a)
print b

Esempio n. 4
0
File: svm.py Progetto: rikel/cvm
 def train(self, labeledPoints):
     labeledPoints = cascade(labeledPoints, self._reduce, self.nmax)
     X, y = self._readiterator(labeledPoints)
     self.model = self.create_model()
     self.model.fit(X, y)
Esempio n. 5
0
File: svm.py Progetto: rikel/cvm
 def train(self, labeledPoints):
     labeledPoints = cascade(labeledPoints, self._reduce, self.nmax)
     X, y = self._readiterator(labeledPoints)
     self.model = self.create_model()
     self.model.fit(X, y)
 def cascadeCallback(self, sender):
     cascade()
Esempio n. 7
0
G = nx.from_pandas_edgelist(df, 'from', 'to', create_using=nx.DiGraph())

#%% Launch Cascade Model

from cascade import cascade
import random

df = dfRaw[94000:94100]

# Assign Random Seed Vertices
n = 2
seeds = random.sample(list(set(df['from'])), n)
#seeds = init_seeds;

# Run Cascade Model
df, df_inf, steps = cascade(seeds, df)

#%%

# Rename 'edgelist' to dataframe
#df = edgelist;

# Handle edgelist unweighted networks with uniform weights
# Assign likelihood of adoption for all vertices
df['willingness'] = df['willingness'].apply(lambda x: random.random())
df['influenced'] = np.zeros((len(df), 1))

# Assign "influenced" values of 10 to seeded nodes
df['influenced'] = np.where(df['from'].isin(seeds), 10, 0)
steps = 0
Esempio n. 8
0
File: kreg.py Progetto: rikel/cvm
    def train(self, labeledPoints):
        labeledPoints = cascade(labeledPoints, self._reduce, self.nmax)
        self.X, y = self._readiterator(labeledPoints)

        # final model does not need regularization
        self.model, self.support = self._fit(self.X, y, newC=self.C * 10.0)
Esempio n. 9
0
 def cascadeCallback(self, sender):
     cascade()
Esempio n. 10
0
for k in [5]:

    for method in ['greedy', 'random', 'out-degree', 'closeness centrality']:

        print(str(k))
        start = time.time()
        if method == 'greedy':

            A = greedy(k, p, 50, G, nodes, edges)

        if method == 'random':

            A = set(random.sample(nodes, k))

        if method == 'closeness centrality':
            A = centrality(G, k)

        if method == 'out-degree':
            A = out_degree(G, k)

        end = time.time()

        #test results#
        nodes_reached, GX = cascade(G, A, p)

        out_file.write('%s\t%s\t%f\t%d\t%d\n' %
                       (method, str(A), end - start, k, nodes_reached))

out_file.flush()
out_file.close()
Esempio n. 11
0
 def on_evolving_train_config_activate(self, radiobutton, data=None):
     """For showing window for configuring parameters related to cascade
     Training."""
     wcascade = cascade()
Esempio n. 12
0
    def train(self, labeledPoints):
        labeledPoints = cascade(labeledPoints, self._reduce, self.nmax)
        self.X, y = self._readiterator(labeledPoints)

        # final model does not need regularization
        self.model, self.support = self._fit(self.X, y, newC=self.C * 10.0)
Esempio n. 13
0
 def on_evolving_train_config_activate(self,radiobutton,data=None):
     """For showing window for configuring parameters related to cascade
     Training."""
     wcascade=cascade()