Esempio n. 1
0
def test_suggested_cased_always_qualified_column_names(completer):
    text = 'SELECT  from users'
    position = len('SELECT ')
    cols = [column(c) for c in cased_users_col_names]
    result = result_set(completer, text, position)
    assert result == set(cased_funcs + cols + testdata.builtin_functions() +
                         testdata.keywords())
Esempio n. 2
0
def test_columns_before_keywords(completer):
    text = 'SELECT * FROM orders WHERE s'
    completions = get_result(completer, text)

    col = column('status', -1)
    kw = keyword('SELECT', -1)

    assert completions.index(col) < completions.index(kw)
Esempio n. 3
0
def test_columns_before_keywords(completer):
    text = 'SELECT * FROM orders WHERE s'
    completions = get_result(completer, text)

    col = column('status', -1)
    kw = keyword('SELECT', -1)

    assert completions.index(col) < completions.index(kw)
Esempio n. 4
0
def test_suggested_cased_always_qualified_column_names(
        completer
):
    text = 'SELECT  from users'
    position = len('SELECT ')
    cols = [column(c) for c in cased_users_col_names]
    result = result_set(completer, text, position)
    assert result == set(cased_funcs + cols
                         + testdata.builtin_functions() + testdata.keywords())
Esempio n. 5
0
 def predict(self, x_data):
     y_hat_models = []
     for model in self.models:
         y_hat_models.append((model.predict(x_data)))  # make a predict with every model
     y_hat = []
     for i in range(len(x_data)):  # find the most frequency predict. (can not be equality due to odd number of
         # models)
         lst = column(y_hat_models, i)
         y_hat.append(max(set(lst), key=lst.count))
     return y_hat
Esempio n. 6
0
def adaboost(train, test, headers, fullTestData):
    ylabels = ['H', 'A', 'D']#Make predictions for each of the possible labels
    results = []
    for y in ylabels:
#        print "Training for", y
        rootNode = Node(train)#Initialize first decision stump
        treeRootNode = buildTree(rootNode, y, headers)
        results.append(predict(test, rootNode.splits, y))
    
    print "Now making predictions"    
    prediction = []
    for r in xrange(0,len(results[0])):
        temp = [zy for zy in column(results, r)]
        #Take the label with corresponding max value of alpha as final prediction
        prediction.append(ylabels[temp.index(max(temp))])
        
    print "Now checking predictions"
    corr = 0
    print "Home\tAway\tPrediction\tActual\tBookie"
    file = open("resultdata.csv", 'a')
    writer = csv.writer(file, quoting=csv.QUOTE_ALL)
    for p in xrange(0,len(prediction)):
        print '\a'
        writer.writerow([column(fullTestData,-2)[p], column(fullTestData,-1)[p], prediction[p], column(test,-2)[p], column(fullTestData,-4)[p]]) 
        print [column(fullTestData,-2)[p], column(fullTestData,-1)[p], prediction[p], column(test,-2)[p], column(fullTestData,-4)[p]]
        if prediction[p] == column(test,-2)[p]:
            corr+=1
    
    file.close()
    try:
        print str(float(corr)*100/len(prediction)), len(prediction)
    except ZeroDivisionError:
        print 0, len(prediction)
    print "done"
Esempio n. 7
0
def test_join_using_suggests_columns_after_first_column(completer, text):
    result = result_set(completer, text)
    cols = [column(c) for c in metadata['tables']['USERS']]
    cols += [column(c) for c in metadata['tables']['ORDERS']]
    assert result == set(cols)
Esempio n. 8
0
def test_suggest_columns_after_three_way_join(completer):
    text = '''SELECT * FROM users u1
              INNER JOIN users u2 ON u1.id = u2.id
              INNER JOIN users u3 ON u2.id = u3.'''
    result = result_set(completer, text)
    assert (column('id') in result)
Esempio n. 9
0
def test_no_column_qualification(text, completer):
    cols = [column(c) for c in cased_users_col_names]
    result = result_set(completer, text)
    assert result == set(cols)
Esempio n. 10
0
cased_func_names = [
    'Custom_Fun', '_custom_fun', 'Custom_Func1', 'custom_func2',
    'set_returning_func'
]
cased_tbls = ['Users', 'Orders']
cased_views = ['User_Emails', 'Functions']
casing = (['SELECT', 'PUBLIC'] + cased_func_names + cased_tbls + cased_views +
          cased_users_col_names + cased_users2_col_names)
# Lists for use in assertions
cased_funcs = [
    function(f)
    for f in ('Custom_Fun', '_custom_fun', 'Custom_Func1', 'custom_func2')
] + [function('set_returning_func')]
cased_tbls = [table(t) for t in (cased_tbls + ['"Users"', '"select"'])]
cased_rels = [view(t) for t in cased_views] + cased_funcs + cased_tbls
cased_users_cols = [column(c) for c in cased_users_col_names]
aliased_rels = [
    table(t) for t in ('users u', '"Users" U', 'orders o', '"select" s')
] + [view('user_emails ue'), view('functions f')] + [
    function(f) for f in ('_custom_fun() cf', 'custom_fun() cf',
                          'custom_func1() cf', 'custom_func2() cf')
] + [
    function('set_returning_func(x := , y := ) srf',
             display='set_returning_func(x, y) srf')
]
cased_aliased_rels = [
    table(t) for t in ('Users U', '"Users" U', 'Orders O', '"select" s')
] + [view('User_Emails UE'), view('Functions F')] + [
    function(f) for f in ('_custom_fun() cf', 'Custom_Fun() CF',
                          'Custom_Func1() CF', 'custom_func2() cf')
] + [
Esempio n. 11
0
    
    try:#Try and load serialized data with features extracted
        traindata = pickle.load( open( "traindata.p", "rb" ) )
        headers = pickle.load( open( "headers.p", "rb" ) )
    except:#Else now calculate feature data for every match and seralize it
        traindata, teamwise, matchwise, headers = trainOnAll(data)
        pickle.dump( traindata, open( "traindata.p", "wb" ) )
        pickle.dump( teamwise, open( "teamwise.p", "wb" ) )
        pickle.dump( matchwise, open( "matchwise.p", "wb" ) )
        pickle.dump( headers, open( "headers.p", "wb" ) )
    
    
#    normalize features in training data
# If any feature has missing values (None) replace it with the average
    for x in xrange(len(traindata[0])-5):
        tmp = [t for t in column(traindata,x) if t != None]
        if len(tmp) == 0:
            avg = 0.0
        else:
            avg = sum(tmp)/len(tmp)
        mn = min(tmp)
        mx = max(tmp)
        #Now normalize the values as (fx-fmin)/fmax
        for num, y in enumerate(column(traindata,x)):
            if traindata[num][x] == None:
                try:
                    traindata[num][x] = (avg-mn)/(mx-mn)
                except ZeroDivisionError:
                    traindata[num][x] = 0.0
            else:
                try:
Esempio n. 12
0
def test_join_using_suggests_columns_after_first_column(completer, text):
    result = result_set(completer, text)
    cols = [column(c) for c in metadata['tables']['USERS']]
    cols += [column(c) for c in metadata['tables']['ORDERS']]
    assert result == set(cols)
Esempio n. 13
0
def test_suggest_columns_after_three_way_join(completer):
    text = '''SELECT * FROM users u1
              INNER JOIN users u2 ON u1.id = u2.id
              INNER JOIN users u3 ON u2.id = u3.'''
    result = result_set(completer, text)
    assert (column('id') in result)
Esempio n. 14
0
def test_no_column_qualification(text, completer):
    cols = [column(c) for c in cased_users_col_names]
    result = result_set(completer, text)
    assert result == set(cols)
Esempio n. 15
0
cased_func_names = [
    'Custom_Fun', '_custom_fun', 'Custom_Func1', 'custom_func2', 'set_returning_func'
]
cased_tbls = ['Users', 'Orders']
cased_views = ['User_Emails', 'Functions']
casing = (
        ['SELECT', 'PUBLIC'] + cased_func_names + cased_tbls + cased_views
        + cased_users_col_names + cased_users2_col_names
)
# Lists for use in assertions
cased_funcs = [
                  function(f) for f in ('Custom_Fun', '_custom_fun', 'Custom_Func1', 'custom_func2')
              ] + [function('set_returning_func')]
cased_tbls = [table(t) for t in (cased_tbls + ['"Users"', '"select"'])]
cased_rels = [view(t) for t in cased_views] + cased_funcs + cased_tbls
cased_users_cols = [column(c) for c in cased_users_col_names]
aliased_rels = [
                   table(t) for t in ('users u', '"Users" U', 'orders o', '"select" s')
               ] + [view('user_emails ue'), view('functions f')] + [
                   function(f) for f in (
        '_custom_fun() cf', 'custom_fun() cf', 'custom_func1() cf',
        'custom_func2() cf'
    )
               ] + [function(
    'set_returning_func(x := , y := ) srf',
    display='set_returning_func(x, y) srf'
)]
cased_aliased_rels = [
                         table(t) for t in ('Users U', '"Users" U', 'Orders O', '"select" s')
                     ] + [view('User_Emails UE'), view('Functions F')] + [
                         function(f) for f in (
app.layout = html.Div(
    children=[
        row([
            html.H2(
                'Visualizing common distributions',
                style={ 'textAlign': 'center', 'color': colors['text']},
        )]),

        row([
            column([
                row([
                    html.Br(),
                    html.Label('Set N:'),
                    dcc.Input(id='set_n', value=10, type='number', min=0, step=1, size=1),
                ]),
                row([
                    html.Label('Set p:'),
                    dcc.Input(id='set_p', value=0.5, type='number', min=0.0, max=1.0, step=0.05),
                ])
            ], className='two columns'),

            column([
                dcc.Graph(
                    id='binomial_graph',
                    config={
                        'displaylogo': False, #dont show plotly logo
                        'modeBarButtonsToRemove': ['pan2d','lasso2d'], #dont show certain options in plotly menu
                    },
                ),
            ], className='eight columns'),