def train_peak_memory(cycles,hyperscan=True):

    '''Peak Memory Profiler

    Performs a peak memory test in a loop for the
    train() function.

    OPTIONS: hyperscan mode is 'True' (default) or 'False'

    '''
    
    temp = data('parties_and_employment')

    for i in range(cycles):
        if hyperscan is True:
            %memit train([1,7],'PERUSS', temp, epoch=50, hyperscan=True)
        else:
            %memit train([1,7],'PERUSS', temp, epoch=50, hyperscan=False)
示例#2
0
test_df['Cabin_A'] = test_df.Cabin.str.startswith('A') == True
test_df['Cabin_B'] = test_df.Cabin.str.startswith('B') == True
test_df['Cabin_C'] = test_df.Cabin.str.startswith('C') == True
test_df['Cabin_D'] = test_df.Cabin.str.startswith('D') == True
test_df['Cabin_E'] = test_df.Cabin.str.startswith('E') == True
test_df['Cabin_F'] = test_df.Cabin.str.startswith('F') == True
test_df['Cabin_G'] = test_df.Cabin.str.startswith('G') == True
test_df['Cabin_T'] = test_df.Cabin.str.startswith('T') == True
test_df['Cabin_NaN'] = test_df.Cabin.str.startswith('NaN') != False

train_df = train_df.drop(['Ticket','Cabin','Embarked','Sex'],axis=1)
test_df = test_df.drop(['Ticket','Cabin','Embarked','Sex'],axis=1)

train_df = train_df.dropna()
test_df = test_df.dropna()

## 3. Training the model with Autonomio

train([2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21],'Survived',train_df,
                                dims=8,
                                flatten='none',
                                epoch=250,
                                dropout=0,
                                batch_size=12,
                                loss='logcosh',
                                activation='elu',
                                layers=6,
                                shape='brick',
                                save_model='titanic')
示例#3
0
data('test_data.csv', 'file', header=None)
data('test_data.csv')

temp1 = wrangler(df=temp, y='neg', vectorize='text')
temp1 = wrangler(df=temp, max_categories='max', to_string='text',
                 first_fill_cols='url', starts_with_col='location')
temp1 = wrangler(df=temp, max_categories=42,
                 vectorize=['text', 'user_tweets'])

X = transform_data(temp, flatten='none')
X = transform_data(temp, flatten='none', X=1)
Y = transform_data(temp, flatten='none', Y='neg')
X, Y = transform_data(temp, flatten='none', X=1, Y='neg')

# x variable input modes
tr = train(1, 'neg', temp, model='regression', flatten='mode')
tr = train([1, 5], 'neg', temp, model='regression',
           reg_mode='logistic', flatten='cat_string')
tr = train([1, 2, 3, 4, 5], 'neg', temp,
           model='regression',
           reg_mode='regularized',
           flatten='cat_numeric')

# y variable flattening mode
tr = train(1, 'quality_score', temp, flatten='median')
tr = train(1, 'quality_score', temp, flatten=6)
tr = train(1, 'quality_score', temp, flatten=.5)
tr = train(1, 'quality_score', temp, flatten='mean')

# model saving and loading
tr = train('text', 'neg', temp, save_model='test_model')
        e = 0
        e = str(e)
        data2[i, 8] = e
    else:
        for j in range(d):
            if b[j] in data2[i, 8]:
                e = j + 1
                e = str(e)
                data2[i, 8] = e
                
df.Cabin = data[:,8]
df = df.dropna()

df2.Cabin = data2[:,8]
df2 = df2.dropna()

p = train([1,2,3,4,5,6,7,8,],'Survived',df,
                                dims=8,
                                flatten='none',
                                epoch=150,
                                dropout=0,
                                batch_size=12,
                                loss='logcosh',
                                activation='elu',
                                layers=6,
                                shape='brick',
                                save_model='titanic'
                                )

te = test(df2, 'titanic', labels='Name')
示例#5
0
neuron_last = int(j['neuron_last'])
shape = str(j['shape']).strip("'")
loss = str(j['loss'])
optimizer = str(j['optimizer'])
activation = str(j['activation'])
activation_out = str(j['activation_out'])

data = pd.read_csv('data_temp/data.csv')

tr = train(x,
           y,
           data,
           flatten=flatten,
           epoch=epoch,
           dims=dims,
           batch_size=batch_size,
           layers=layers,
           neuron_first=neuron_first,
           neuron_last=neuron_last,
           loss=loss,
           optimizer=optimizer,
           activation=activation,
           activation_out=activation_out)

train_acc = tr[1].history['acc'][-1]
test_acc = tr[1].history['val_acc'][-1]
train_loss = tr[1].history['loss'][-1]
test_loss = tr[1].history['val_loss'][-1]

train_acc = round(train_acc, 3)
test_acc = round(test_acc, 3)
train_loss = round(train_loss, 3)