Example #1
0
images=x_test[0:9]
true_cls=np.argmax(y_test[0:9],axis=1)
predicted_cls=np.argmax(prediction[0:9],axis=1)
plot_images(x_test,true_cls,predicted_cls)

"""#FOR DOWNLOADING MODEL FOR TENSOFLOW JS"""

!pip3 install tensorflowjs 
!mkdir model
model.save('conv.h5')
!ls
!tensorflowjs_converter --input_format keras \conv.h5 \model
!zip -r model.zip model 
!ls -l
from google.colab import files
files.download('model.zip')

"""#FOR SAVING KERAS MODEL TO GOOGLE DRIVE"""

!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
Example #2
0
# Look at the state dict
model.state_dict()

# Save the model
torch.save(model.state_dict(), 'mymodel.pt')

# Load the model
model2 = nn.Sequential(nn.Linear(D, 1), nn.Sigmoid())

model2.load_state_dict(torch.load('mymodel.pt'))

# Get accuracy
with torch.no_grad():
    p_train = model2(X_train)
    p_train = np.round(p_train.numpy())
    # true evaluates to 1
    # false evaluates to 0
    train_acc2 = np.mean(y_train.numpy() == p_train)

    p_test = model2(X_test)
    p_test = np.round(p_test.numpy())
    test_acc2 = np.mean(y_test.numpy() == p_test)

print(f"Train accuracy 2: {train_acc2:.4f}, Test accuracy 2: {test_acc2:.4f}")

# Download the model
from google.colab import files

files.download('mymodel.pt')
			s64_funcs.YOLO(image_name)
	else:
			s64_funcs.YOLO(image_name, subfolder)
else:
	for image in image_names:
		if subfolder == '':
			s64_funcs.YOLO(image)
		else:
			s64_funcs.YOLO(image, subfolder)

import os
os.path.isfile('/mlobjectdetectionprogramme')
os.path.isdir('/sample_data')

"""# Save Output

Run the cell below to download the files.
If you chose to put the output files in a subfolder, then the subfolder will be saved as .zip file.
If files were saved directly to the working directory, then they will be downloaded as individual files.
"""

# Download output files
if subfolder == '':
  output_fls = glob('Processed_*')
  output_fls.extend(glob('*_Output_data.txt'))
  for output_fl in output_fls:
    files.download('/mlobjectionprogramme/'+output_fl)
else:
  shutil.make_archive(subfolder+'_dl', 'zip', '/content/'+subfolder)
  files.download('mlobjectdetectionprogramme'+subfolder+'_dl.zip')
print('Download starting...')
            (0* lgb_model_full_data.predict(z_test2)) + \
            (0 * stack_gen_model.predict(np.array(z_test2)))))

"""Predicted cap hit% for the 2019-2020 skater class. The term predicted by the term model was used as the model input for the cap hit% model."""

#@title

predictions = pd.DataFrame(blended_predictions(z_test2))
predictions = pd.concat([player_names, predictions], axis=1)

predictions

from google.colab import files

predictions.to_csv('Raw_Player_Projections.csv')
files.download('Raw_Player_Projections.csv')

"""# Visualizing Feature Importance"""

#@title
# Install SHAP
!pip install shap==0.27
import shap

#@title
#Vizuallise feature importances with Shap (global)
explainer = shap.TreeExplainer(lightgbm)
shap_values = explainer.shap_values(X2)

"""The following graph shows the 20 most important features in the cap hit% model. 
Example #5
0
final_opt_model_rf.fit(X_train, y_train)

print("Random Forest Training RMSE: ",
      scoreRMSE(final_opt_model_rf, X_train, y_train))
print("Random Forest Validation RMSE: ",
      scoreRMSE(final_opt_model_rf, X_val, y_val))
"""Perfect, we can now use this model to do the predictions. We could have gone beyond and searched for higher paramenters, but it seems a bit unnecessary, especially considering that the optimal parameters are in the middle of our range of parameter values. Seeing as this is the best we have achieved, we will use this model for our predictions."""

# make predictions using optimized Random Forest model fitted above
predictions_rf = final_opt_model_rf.predict(X_test)
"""We finally convert our predictions to a dataframe with the right submission format and inspect to make sure it is correct."""

# format predictions to be compatible with Kaggle upload
sample_submission = pd.DataFrame(data=predictions_rf, columns=['Predicted'])
sample_submission.insert(0, "Id", range(1, 1 + X_test.shape[0]))
sample_submission['Id'] = sample_submission['Id'].astype(str)
sample_submission.head()
"""We can finally convert to CSV and submit to Kaggle."""

# save predictions to .csv file for upload to Kaggle
sample_submission.to_csv("sample_submission.csv", index=False)
files.download('sample_submission.csv')
"""This is a huge improvement! This is very positive results and the best we can do with the current grid search method. We could have gone beyond and searched for higher paramenters, but it seems a bit unnecessary, especially considering that the optimal parameters are in the middle of our range of parameter values. Seeing as this is the best we have achieved, we will use this model for our predictions."""

# save predictions to .csv file for upload to Kaggle
sample_submission.to_csv("sample_submission.csv", index=False)
files.download('sample_submission.csv')
"""These results make total sense. A random forest is a complex model using ensembling of decision trees, which enables it to fit a very large multi-features model. Still, it is simple enough to be well optimized by grid searching, as we have shown. It is therefore a very good model for the task we are tasked of doing here, and can be seen by the performances.

At the writing of this file (Thursday, February 14, 22:00), this gives us a ranking of position 13 on the Kaggle competition.
"""

datos_procesado.shape


# In[ ]:


datos_procesado.ID_BAJA.value_counts()


# In[ ]:


datos_procesado[columnasModelo3 + ['ID_BAJA']].to_csv("data_modelo.csv", index=False)
files.download("data_modelo.csv")


# **SELECCION DE VARIABLES**

# In[ ]:


datos_X = datos_procesado[columnasModelo3]
datos_y = datos_procesado["ID_BAJA"]


# In[ ]:


datos_X.shape
pred_val = lin_reg_1.predict(poly_reg.fit_transform(x_val))

print(r2_score(y_val, pred_val))

ids = test['ID']
test.drop(['ID','Username'], axis=1,inplace =True)

labelencoder_X = LabelEncoder()
test['Tag'] = labelencoder_X.fit_transform(test['Tag'])

from sklearn.preprocessing import Binarizer
bn = Binarizer(threshold=7)
pd_watched = bn.transform([test['Answers']])[0]
test['pd_watched'] = pd_watched

   
test = sc_X.fit_transform(test)

pred_test = lin_reg_1.predict(poly_reg.fit_transform(test))
pred_test=abs(pred_test)

submission = pd.DataFrame({'ID': ids,
                           'Upvotes':pred_test
                           })

submission.to_csv("linearregr.csv",index=False)

from google.colab import files
files.download('linearregr.csv')
Example #8
0
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) #optimizer - процедура обучения, loss - функция, которую мы отпимизируем
#metrics - параметр, по которому будем отслеживать наше обучение

print(model.summary()) # любуемся нашей моделью и смотрим сколько у нее будет параметров (весов)

model.fit(x_train, y_train, batch_size=200, epochs=25, validation_split = 0.2, verbose=1)#обучение модели

"""<b> Сохранение модели на компьютер</b>"""

model.save("MNIST_Sequential_dense.h5") #сохранение модели на сервер гугла

!ls # Из линукса



files.download('MNIST_Sequential_dense.h5') # загрузка сохраненного файла с сервера гугла

"""<b>Чтобы использовать на своем компьютере</b><br>
from keras.models import load_model<br>
model=load_model('MNIST_Sequential_dense.h5')<br>
"""

n_new = 1112
y_test_org[1112]

img = Image.fromarray(x_test_org[n_new], mode='P') #проверка одного значения из тестового слоя (на занятии мы нашли что на 1112 значении модель ошибается, хотя точность была 98 процентов)
plt.imshow(img)
plt.show()

x = x_test[n_new]
x = np.expand_dims(x, axis=0)
display_image(EPOCHS)

anim_file = 'dcgan.gif'

with imageio.get_writer(anim_file, mode='I') as writer:
  filenames = glob.glob('image*.png')
  filenames = sorted(filenames)
  last = -1
  for i,filename in enumerate(filenames):
    frame = 2*(i**0.5)
    if round(frame) > round(last):
      last = frame
    else:
      continue
    image = imageio.imread(filename)
    writer.append_data(image)
  image = imageio.imread(filename)
  writer.append_data(image)

import IPython
if IPython.version_info > (6,2,0,''):
  display.Image(filename=anim_file)

try:
  from google.colab import files
except ImportError:
   pass
else:
  files.download(anim_file)
!ls

restored_model = torch.load('customer_buy.pt')

y_cust_20_40000 = restored_model(torch.from_numpy(sc.transform(np.array([[40,20000]]))).float())
y_cust_20_40000

_, predicted_20_40000 = torch.max(y_cust_20_40000.data,-1)
predicted_20_40000

model.state_dict()

torch.save(model.state_dict(),'customer_buy_state_dict')

!ls

new_predictor = Net()

y_cust_20_40000 = new_predictor(torch.from_numpy(sc.transform(np.array([[40,20000]]))).float())
y_cust_20_40000

!zip -r customer_buy_state_dict.zip customer_buy_state_dict

!ls

from google.colab import files

files.download('customer_buy_state_dict.zip')

Example #11
0
def downloadFiles(directory):
    """
    Function which downloads all files in the directory specified in the Collab environment.   
    """
    for filename in os.listdir(directory):
        files.download(directory + filename)
Example #12
0
        total = 0
        for images, labels in test_loader:
            images = images.reshape(-1, sequence_length, input_size).to(device)
            labels = labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        print(
            'Test Accuracy of the model on the 10000 test images: {} %'.format(
                100 * correct / total))
        print('Loss: ', loss.item())


# Create instance of the recurrent network
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train and test
train(model, device, train_loader, criterion, optimizer, num_epochs)
test(model, device, test_loader, criterion)

# Save and download the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
files.download('loss_of_' + rnn_structure + '.npy')
files.download("model.ckpt")
Example #13
0
                             (df_actions['period_id'] >= 3) * (15 * 60) + 
                             (df_actions['period_id'] == 4) * (15 * 60)
                             )

add_distance_features(expert_actions)
add_time_played(expert_actions)

expert_actions.shape

expert_actions.to_excel('expert_actions.xlsx')

from google.colab import files
expert_actions.to_excel('expert_actions.xlsx')
#files.download("data.csv")

files.download("expert_actions.xlsx")

expert_actions.columns



# generate state features

df_features=expert_actions[['period_id','bodypart_id','type_id', 'result_id','start_x_norm', 'start_y_norm', 'end_x_norm', 'end_y_norm',
       'start_distance_to_goal', 'start_angle_to_goal', 'end_distance_to_goal',
       'end_angle_to_goal', 'clearance', 'corner_crossed', 'corner_short', 'cross', 'dribble',
       'foul', 'freekick_crossed', 'freekick_short', 'goalkick',
       'interception', 'keeper_save', 'pass', 'shot', 'shot_freekick',
       'shot_penalty', 'tackle', 'take_on', 'throw_in', 'diff_x', 'diff_y',
       'distance_covered', 'time_played', 'time_remaining' ]]
df["50"] = df["50"].astype(float)
df["0"] = df["0"].astype(float)
df["Last 4 match runs mean"] = df["Last 4 match runs mean"].astype(float)
df["Man of the match"] = df["Man of the match"].astype(float)
df["Runs"] = df["Runs"].astype(float)
df["HS"] = df["HS"].astype(float)

df.dtypes

df["Man of the match"].replace(np.nan, 0, inplace=True)

df.mean()

df["Ave"].replace(np.nan, 18.805680, inplace=True)
df["Height (cm)"].replace(np.nan, 181.430074, inplace=True)
df["SR"].replace(np.nan, 65.630124, inplace=True)
df["Last 4 match runs mean"].replace(np.nan, 13.253897, inplace=True)

print(df.isnull().sum())

print(df["Batting Style"].value_counts())

df["Batting Style"].replace(np.nan, 'R', inplace=True)

#df.to_csv("E:/University Works/4th Year/Semester 7/CO421 - Final Year Project/bt-copy.csv",sep=',')

from google.colab import files

df.to_csv('421_proj_missingValues.csv', index=False)
files.download('421_proj_missingValues.csv')
Example #15
0
    cv2.imwrite('/content/drive/My Drive/dataset/outputM1/health/{}_{}.png'.format(checkH,name),image)
    checkH += 1
  c += 1

print(checkH)
print(checkNH)

plt.plot(mhis.history['loss'])
plt.plot(mhis.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper right')
plt.ylim(top=1.2, bottom=0)
plt.show()

plt.plot(mhis.history['acc'])
plt.plot(mhis.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='lower right')
plt.show()

model_2.save('FirstModel.h5')
from google.colab import files
files.download("FirstModel.h5")

!dir

Example #16
0
    parser2.add_argument("-f", type=str, help="colab option.")
    args2 = parser2.parse_args()

references = {}
predictions = {}

for i in range(1, len(df_test)+1): 
  #references[i] = [df_trial_answers['reason1'][i].split(), df_trial_answers['reason2'][i].split(), df_trial_answers['reason3'][i].split()]
  sent = df_test['FalseSent'][i]
  #predictions[i] = df['FalseSent'][i].split()
  predictions[i] = run(sent)[0].split()

predictions[1]

List = []
List2= []
for i in range(1 , len(df_test)+1):
  List.append(predictions[i])
  List2.append(' '.join(word for word in List[i-1]))

List2

submission = pd.DataFrame(List2) 
submission.index = np.arange(1, len(submission) + 1)
submission

submission.to_csv('subtaskC_answers.csv', header=False)
from google.colab import files
files.download('subtaskC_answers.csv')

Example #17
0
def y(t, k, Ca0):
    y = odeint(secondorder, Ca0, t, args=(k, ))
    return y.ravel()


popt, cov = curve_fit(y, t, data_ca, [20, 1])
a_opt, y0_opt = popt
print(y)

print("a = %g" % a_opt)
print("y0 = %g" % y0_opt)

import matplotlib.pyplot as plt
plt.plot(t, data_ca, '.', t, y(t, a_opt, y0_opt), '-')
plt.gcf().set_size_inches(11, 7)
plt.savefig('out.png', dpi=96)
plt.show()

y = odeint(secondorder, y0_opt, t, args=(a_opt, ))
#guardando as informações e salvando em txt
data = np.vstack((t, y.T))
data = data.T
print(data)

#salvando em csv
df = pd.DataFrame(data)
df.to_csv("data.csv")
files.download('data.csv')
print(df)
Example #18
0
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

def decode_sentence(text):
    return ' '.join([reverse_word_index.get(i, '?') for i in text])

e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)

# Expected output
# (1000, 16)

import io

out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
  word = reverse_word_index[word_num]
  embeddings = weights[word_num]
  out_m.write(word + "\n")
  out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()

try:
  from google.colab import files
except ImportError:
  pass
else:
  files.download('vecs.tsv')
  files.download('meta.tsv')
epochs = 10
steps_per_epoch = 100

step = 0
for n in range(epochs):
    for m in range(steps_per_epoch):
        step += 1
        train_step(image)
        print(".", end='')
    display.clear_output(wait=True)
    display.display(tensor_to_image(image))
    print("Train step: {}".format(step))

end = time.time()
print("Total time: {:.1f}".format(end - start))

file_name = arg3
# file_name = 'stylized-image.png'
tensor_to_image(image).save(file_name)

try:
    from google.colab import files
except ImportError:
    pass
else:
    files.download(file_name)

print('------------------------------------------------------')
print('----------------DONE----------------------------------')
Example #20
0
 def downloadFile(self,path):
   global files
   files.download(path)
# %cd darknet
!sed -i 's/GPU=0/GPU=1/g' Makefile
!sed -i 's/OPENCV=0/OPENCV=1/g' Makefile
!make

"""## Get the YOLO Weights file"""

!wget https://pjreddie.com/media/files/yolov3.weights
!chmod a+x ./darknet

"""## To get the working Directory"""

!pwd

from google.colab import drive
drive.mount('/content/drive')

!apt install ffmpeg libopencv-dev libgtk-3-dev python-numpy python3-numpy libdc1394-22 libdc1394-22-dev libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libxine2-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libv4l-dev libtbb-dev qtbase5-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils unzip

"""## It's time to upload the video"""

from google.colab import files

uploaded = files.upload()

!./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -dont_show video_1.mp4 -i 0 -out_filename video1_detected.avi -thresh 0.7

from google.colab import files
files.download('video1_detected.avi')

else:
  n = 1
  max_gen_length = 2000 if model_cfg['word_level'] else 10000
  
timestring = datetime.now().strftime('%Y%m%d_%H%M%S')
gen_file = '{}_gentext_{}.txt'.format(model_name, timestring)

textgen.generate_to_file(gen_file,
                         temperature=temperature,
                         prefix=prefix,
                         n=n,
                         max_gen_length=max_gen_length)

"""Download and save your text file to your computer"""

files.download(gen_file)

"""Now you have some text to play with creatively. Use a text to speech program to create a sound file https://www.naturalreaders.com/online/ 

Design an animation etc etc...

#SECTION 3

In case you want to save any of the models you've trained. You can download the weights below.

You can download the weights and configuration files in the cell below, allowing you recreate the model on your own computer!
"""

files.download('{}_weights.hdf5'.format(model_name))
files.download('{}_vocab.json'.format(model_name))
files.download('{}_config.json'.format(model_name))
Example #23
0
    epochs=40,
    callbacks=[tensorboard_callback]
    )

# Commented out IPython magic to ensure Python compatibility.
# %load_ext tensorboard
# %tensorboard --logdir log

model2.summary()

tf.saved_model.save(model2, 'demeter_model')

!zip -r demeter_model.zip demeter_model

from google.colab import files
files.download('demeter_model.zip')

#_, train_acc = model.evaluate([encoded_train["input_ids"],encoded_train["attention_masks"]], (slot_train, intent_train))
#_, test_acc = model.evaluate([encoded_valid["input_ids"],encoded_valid["attention_masks"]], (slot_valid, intent_valid))


train_e = model2.evaluate([encoded_train["input_ids"],encoded_train["attention_masks"]], (slot_train, intent_train))
test_e = model2.evaluate([encoded_valid["input_ids"],encoded_valid["attention_masks"]], (slot_valid, intent_valid))

#train_e = model.evaluate([encoded_train["input_ids"],encoded_train["attention_masks"]], (slot_train, intent_train))
#test_e = model.evaluate([encoded_valid["input_ids"],encoded_valid["attention_masks"]], (slot_valid, intent_valid))

#print("train acc", train_acc)
#print("test acc", test_acc)

y_pred = model2.predict([encoded_valid["input_ids"], encoded_valid["attention_masks"]])#.argmax(axis=-1)
Example #24
0
y = sequences[:,-1]

print("y to categorical")
y = to_categorical(y, num_classes=vocabulary_size+1)
seq_len = X.shape[1]

print("creating model")
model = create_model(vocabulary_size+1, seq_len)

# from pickle import dump, load
model.fit(X,y, batch_size=256, epochs=300, verbose=1)

print("random seed")
random_pick = random.randint(0, len(processed))
random_seed_text = processed[random_pick]
seed_text = ' '.join(random_seed_text)


print(f'Seed Text: \n{seed_text}\n')

new_text = generate_text(model, tokenizer, seq_len, seed_text=seed_text, num_gen_Words=300)

print(f'\nNew Text: \n{new_text}')

model.save('7Starwars-256-300_NOpunc-4.h5')
# dump(tokenizer, open('my_simple_tokenizer', 'wb'))

from google.colab import files
files.download('7Starwars-256-300_NOpunc-4.h5')

print(random_pick )
Example #25
0
mean_value = df[0]['pcv'].mean()
df[0]['pcv'] = df[0]['pcv'].fillna(math.ceil(mean_value))
mean_value = df[0]['wc'].mean()
df[0]['wc'] = df[0]['wc'].fillna(math.ceil(mean_value))
mean_value = df[0]['rc'].mean()
df[0]['rc'] = df[0]['rc'].fillna(mean_value)
# median
median = df[0]['bgr'].median()
df[0]['bgr'] = df[0]['bgr'].fillna(median)
median = df[0]['bu'].median()
df[0]['bu'] = df[0]['bu'].fillna(median)

# mode
df[0]['rbc'] = df[0]['rbc'].fillna(df[0]['rbc'].mode()[0])
df[0]['pc'] = df[0]['pc'].fillna(df[0]['pc'].mode()[0])
df[0]['pcc'] = df[0]['pcc'].fillna(df[0]['pcc'].mode()[0])
df[0]['ba'] = df[0]['ba'].fillna(df[0]['ba'].mode()[0])
df[0]['htn'] = df[0]['htn'].fillna(df[0]['htn'].mode()[0])
df[0]['htn'] = df[0]['htn'].fillna(df[0]['htn'].mode()[0])
df[0]['cad'] = df[0]['cad'].fillna(df[0]['cad'].mode()[0])
df[0]['appet'] = df[0]['appet'].fillna(df[0]['appet'].mode()[0])
df[0]['pe'] = df[0]['pe'].fillna(df[0]['pe'].mode()[0])
df[0]['ane'] = df[0]['ane'].fillna(df[0]['ane'].mode()[0])
df[0]['class'] = df[0]['class'].fillna(df[0]['class'].mode()[0])

writer = ExcelWriter('Result.xlsx')
df[0].to_excel(writer,'Hoja1')

writer.save()
files.download("Result.xlsx")
    pred[i] = tf.identity(model.output[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)

sess = K.get_session()
output_fld = 'tensorflow_model/'
if not os.path.isdir(output_fld):
    os.mkdir(output_fld)
output_graph_name ='/content/Models/model_.pb'
output_graph_suffix = '_inference'

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph, output_fld, output_graph_name, as_text=False)
print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))

from google.colab import files
files.download('/content/pbtxt/protobuf.pbtxt')

import tensorflow as tf
from tensorflow.core.framework import graph_pb2 as gpb
from google.protobuf import text_format as pbtf

gdef = gpb.GraphDef()

with open('/content/pbtxt/protobuf.pbtxt', 'r') as fh:
    graph_str = fh.read()

pbtf.Parse(graph_str, gdef)

tf.import_graph_def(gdef)

model = tf.keras.models.load_model(os.path.join('/content/Models/inception_frozen.h5'))
  model.add(Flatten())
  model.add(Dense(100, activation = 'elu'))
  model.add(Dropout(0.5))

  model.add(Dense(50, activation = 'elu'))
  model.add(Dense(10, activation = 'elu'))
  model.add(Dense(1))

  optimizer = Adam(lr=1e-3)
  model.compile(loss='mse', optimizer=optimizer)
  return model

model = nvidia_model()
print(model.summary())
print(X_train.shape)

history=model.fit(X_train, y_train, validation_data=(X_valid, y_valid),epochs=10,batch_size=100,verbose=1,shuffle=1)

plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss','val_loss'])
plt.title('loss')
plt.xlabel('epoch')

model.save('model.h5')

from google.colab import files
files.download('model.h5')

Example #28
0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dense, Input, Conv2D, Flatten, Activation

from sklearn.model_selection import train_test_split
X_train,X_test, y_train, y_test= train_test_split(x,y, test_size=0.2, random_state=0)


classifier= Sequential()

classifier.add(Conv2D(10,(5,5), input_shape = (28,28,1), activation ='relu'))
classifier.add(Conv2D(15,(5,5), activation = 'relu'))
classifier.add(Conv2D(20,(5,5), activation = 'relu'))

classifier.add(Flatten())
classifier.add(Dense(64, activation ='relu'))

classifier.add(Dense(1, activation ='sigmoid'))

classifier.compile(optimizer ='Adam', loss = 'binary_crossentropy', metrics =['accuracy'])


classifier.fit(X_train, y_train, validation_data= (X_test, y_test), epochs=20)

import os
classifier.save_weights('Weight_95.h5')
os.listdir()

from google.colab import files
files.download('Weight_95.h5')
Example #29
0
def create_zip():
  !zip -r /content/new.zip /content/new
  from google.colab import files
  files.download("/content/new.zip")
Example #30
0
# coding: UTF-8
"""
 2018.5.2
 Colaboratoryファイル入出力
"""

# アップロード
from google.colab import files
uploaded = files.upload()

# ファイル入力
with open("input.csv", 'r') as f:
    print(f.read())

# ファイル出力
with open("output.txt", "w") as f:
    f.write("Nyanhello\nworld\n")

# 確認
f = open('output.txt', 'r')
print(f.read())

# ダウンロード
from google.colab import files
files.download('output.txt')
Example #31
0
# Get the weight matrix of embedding layer
# (the weights are the numerical patterns between the text in the training dataset that the model has learned)
embed_weights = model_1.get_layer("embedding_1 ").get_weights()[0]
print(embed_weights.shape)

embed_weights = model_1.get_weights()[2]

import io

# Code to save trained embeddings to file - we got this from here: https://www.tensorflow.org/tutorials/text/word_embeddings#retrieve_the_trained_word_embeddings_and_save_them_to_disk
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')

for index, word in enumerate(words_in_vocab):
  if index == 0:
    continue  # skip 0, it's padding.
  vec = embed_weights[index]
  out_v.write('\t'.join([str(x) for x in vec]) + "\n")
  out_m.write(word + "\n")
out_v.close()
out_m.close()

# Let's download the saved embeddings locally
try:
  from google.colab import files
  files.download('vectors.tsv')
  files.download('metadata.tsv')
except Exception:
  pass