joblib.dump(history_cnn, h3)
joblib.dump(history_cnn_glove, h4)

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)  

# get the folder id where you want to save your file
file = drive.CreateFile({'parents':[{u'id': '1rmTGbb19iJn6VbHoHdTYQbK0m0V_EQvP'}]})
file.SetContentFile(m1)
file.Upload() 

file = drive.CreateFile({'parents':[{u'id': '1rmTGbb19iJn6VbHoHdTYQbK0m0V_EQvP'}]})
file.SetContentFile(m2)
file.Upload() 

file = drive.CreateFile({'parents':[{u'id': '1rmTGbb19iJn6VbHoHdTYQbK0m0V_EQvP'}]})
file.SetContentFile(m3)
file.Upload() 

file = drive.CreateFile({'parents':[{u'id': '1rmTGbb19iJn6VbHoHdTYQbK0m0V_EQvP'}]})
file.SetContentFile(m4)
file.Upload() 
model1.summary()
model1.compile(optimizer='rmsprop',loss='msle',metrics = ['accuracy'])

model1.fit(x=X_train, y=y_train, batch_size=170, epochs=100,validation_data=(X_val, y_val), verbose=1)

!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
model1.save('model_beta.h5')    
model_file = drive.CreateFile({'title' : 'model_beta.h5'})
model_file.SetContentFile('model_beta.h5')
model_file.Upload()
drive.CreateFile({'id': model_file.get('id')})

model1.save_weights('model_weights_beta.h5')
weights_file = drive.CreateFile({'title' : 'model_weights_beta.h5'})
weights_file.SetContentFile('model_weights_beta.h5')
weights_file.Upload()
drive.CreateFile({'id': weights_file.get('id')})

score = model1.evaluate(X_test, y_test, verbose=0)
#model = load_model("")

test_array = []
test_index = 259
Esempio n. 3
0
from oauth2client.client import GoogleCredentials
from google.colab import drive

drive.mount('/content/drive/')
auth.authenticate_user()

gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

pd.options.mode.chained_assignment = None
# %matplotlib inline
warnings.filterwarnings('ignore')
"""**1.  Load the Data**"""

train_data = drive.CreateFile({'id': '1TNpBMpZVCbvF6hgP-Gvu5Iybu1hiKJkS'})
train_data.GetContentFile('train.csv')
test_data = drive.CreateFile({'id': '1d2_B-6SFGKtBwAeV3THr0459ClHfsK5C'})
test_data.GetContentFile('test.csv')

train_data = pd.read_csv("train.csv", index_col="PassengerId")
test_data = pd.read_csv("test.csv", index_col="PassengerId")
"""1.   Let's see some info about train_data
2.   Let's see some info about test_data
"""

print(train_data.info())
print(train_data.isna().sum())  # amount of missied values for each column

print(test_data.info())
print(test_data.isna().sum())
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

import io


import zipfile
#https://drive.google.com/open?id=1aVVieDaek7T7ouia1VqVgrbosTP34KGr
file_id="1aVVieDaek7T7ouia1VqVgrbosTP34KGr"
downloaded=drive.CreateFile({'id':file_id})
downloaded.GetContentFile('Dataset.zip')
!unzip Dataset.zip

# Commented out IPython magic to ensure Python compatibility.
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline

from fastai.vision import *
from fastai.metrics import error_rate

bs = 64  #batch size: if your GPU is running out of memory, set a smaller batch size, i.e 16
sz = 224 #image size
PATH = '/content/gdrive/My Drive/Dataset'
Esempio n. 5
0
    stage1_gen.save_weights("stage1_gen.h5")
    stage1_dis.save_weights("stage1_dis.h5")

# Install the PyDrive wrapper & import libraries.
# This only needs to be done once in a notebook.
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# Create & upload a file.
uploaded = drive.CreateFile({'title': 'stage1_dis.h5'})
uploaded.SetContentFile('stage1_dis.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))

# Create & upload a file.
uploaded = drive.CreateFile({'title': 'stage1_gen.h5'})
uploaded.SetContentFile('stage1_gen.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))

Esempio n. 6
0
#Mounting Google drive for Colab execution

from google.colab import drive
drive.mount('/content/drive')

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

your_module = drive.CreateFile({'id':'1dBH4qAfLeP5AUbIlLZunJqcBzQ2BOUFF'})
your_module.GetContentFile('util.py')

from util import plot_confusion_matrix

torch.manual_seed(0)
# Define train and test directories
base_dir = '/content/drive/My Drive/data/places/'

# Pre-processing the dataset
# Normalize the images
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# Resize the images
resize = transforms.Resize((224, 224))

transforms = {
Esempio n. 7
0
    rootlen = len(target_dir) + 1
    for base, dirs, files in os.walk(target_dir):
        for file in files:
            fn = os.path.join(base, file)
            zipobj.write(fn, fn[rootlen:])

zipfolder(zipname, '/content/utils/')

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# 2. Create & upload a file text file.
file1 = drive.CreateFile()
file1.SetContentFile(zipname+".zip")
file1.Upload()

"""###Summary and Perfomance of other architectures

As the required accuracy was to be more than 95% I tried out few more exisiting architectures like Resnet18, Resnet50, Vggn16 unfortunaley I could not get any desired accuracy infact lower,maybe because the dataset we were dealing with this too small for big architecures.
 
For these architecture's implementations I used fast.ai library 
I augmented the data to avoid overfitting as much as possible using fast.ai Imagelist

Results:

Vggn16 with batchnorm : Maximum accuracy of 39%
accuracy kept on oscillating back and forth but did not move above 39%
!mkdir -p first second 
# !mv mnist_test.csv ./top30
# !cd /content/top30
# !ls 
cnt1, cnt2, cnt3 = 1, 1, 1

for row in  data_csv.index:
    # grade = data_csv.loc[row, "生理性別"]
    grade = data_csv.loc[row, "平均成績%數"]
    pic_ID = data_csv.loc[row, "左手的掌心照"]
    
    pic_ID = pic_ID[pic_ID.find("id")+3:]
    if pic_ID in ["1L-DTFqfoj0MKAyG1MCIBYuC0tCTUNby1", "1saw4I-6_Oo-37tOcSqq5ceLA-WIz_vj4", "1invk7pqN5BeaXBtXa3gQRTZ7vICmm08F", "1UrImTDWGqBPHtP4fzREf77i7Kow7w23t", "1aPl6uKvUYwGL-yt6I1x8ILDVONdFJTeU", "1Cf9lkd89gRhDbJxl9POhPFPWy8uGVcgG", "1FwU9BEojTnnCZ_k2NwmUzukuyAl_LkB6", "1EmWAdyIoK7QZ7I9wkK1tSXiY6K4V1zHS" ]:
        continue
    # print(pic_ID)
    downloaded = drive.CreateFile({'id': pic_ID})
    print(pic_ID)

    if grade=="0-10%" or grade=="11-20%" or grade=="21-30%" or grade=="31-40%":
        pic_name = "first_" + str(cnt1) + ".png"
        print(pic_name)
        downloaded.GetContentFile(pic_name)
        succ = convertjpg(pic_name)
        if succ==0:
            os.remove(os.getcwd()+"/"+pic_name)
            continue;
        cnt1 += 1

        img=mpimg.imread(pic_name)
        imgplot = plt.imshow(img)
        plt.show()
drive.mount("/content/gdrive")

!pip install -U -q PyDrive

from pydrive.auth import GoogleAuth
     from pydrive.drive import GoogleDrive
     from google.colab import auth
     from oauth2client.client import GoogleCredentials

auth.authenticate_user()
     gauth = GoogleAuth()
     gauth.credentials = GoogleCredentials.get_application_default()
     drive = GoogleDrive(gauth)

json_import = drive.CreateFile({'id':'1qI9zJ_jd_8MZkzY38e82gQ40ZMBLL-Mw'})

json_import.GetContentFile('PlantVillage.zip')

import json

data = json.load(open('PlantVillage.zip'))

from zipfile import  ZipFile
file_name="PlantVillage.zip"
with Zipfile(file_name,'r') as zip:
  zip.extractall()
  print('Done')

from zipfile import  ZipFile
Esempio n. 10
0
# This only needs to be done once in a notebook.
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# Create & upload a text file.
file_path = '/content/pot_dists_357k.npz'
uploaded = drive.CreateFile({'title': 'pot_dists_357k.npz'})
uploaded.SetContentFile(file_path)
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))

#-------------------------------------------------------------------------------
# split and standard/normalise data
#-------------------------------------------------------------------------------

# entire sample dataset
X = X.reshape(-1, N_pot, N_pot, 1)

# split dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

# standardize X data
# Saving the model weights in your google drive

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# get the folder id where you want to save your file
file = drive.CreateFile(
    {'parents': [{
        u'id': '1WPJKqUwcgnQz6aMP2yZM9l4d-71cUyBa'
    }]})
file.SetContentFile('/content/customer_churn_prediction_model.h5')
file.Upload()

# Saving your model architecture in your google drive

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
Esempio n. 12
0
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# 2. Save Keras Model or weights on google drive

# create on Colab directory
model.save('model.h5')    
model_file = drive.CreateFile({'title' : 'model.h5'})
model_file.SetContentFile('model.h5')
model_file.Upload()

# download to google drive
drive.CreateFile({'id': model_file.get('id')})

#Save the model weights
model.save_weights('model_weights.h5')
weights_file = drive.CreateFile({'title' : 'model_weights.h5'})
weights_file.SetContentFile('model_weights.h5')
weights_file.Upload()
drive.CreateFile({'id': weights_file.get('id')})

# 3. reload weights from google drive into the model
Esempio n. 13
0
    print("Currently Augmenting:", class_names)
    data_dir = os.path.join(train_folder, class_names)
    data_augment(data_dir)

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()

gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

your_module = drive.CreateFile({"id": "1SLIjmWvYhFEQ6ImUlOzv5rZa4eV35eE5"})   # "your_module_file_id" is the part after "id=" in the shareable link
your_module.GetContentFile("six_classes_utils.py") # Save the .py module file to Colab VM

import six_classes_utils

from multiprocessing import Pool

#Compare class distribution
line_chart = pygal.Bar(height=300)
line_chart.title = 'Animals Class Distribution'
for o in os.listdir(train_folder):
    line_chart.add(o, len(os.listdir(os.path.join(train_folder, o))))
galplot(line_chart)

#Oversampling Minority Classes in Training Set
def data_augment(data_dir):
Esempio n. 14
0
    prediction = NB_Classifier.predict(x_test) 
    print('Test accuracy is {}'.format(accuracy_score(y_test, prediction)))
      
    return NB_Classifier

dataset = pd.read_csv('/content/gdrive/My Drive/DataScience/ProjectWork/train.csv')

NB_Model  = NaiveBayesModel(dataset)

def predict(comment,model):
  categories = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
  probs = model.predict_proba([comment])[0]
  for (prob, category) in zip(probs, categories): 
    print('{} : {}%'.format(category, (round(prob,2)*100) ))

predict("he is a good boy. he loves to talk shit.",NB_Model)

#save the model to a file
with open('NB_Model_lat.pkl', 'wb') as f:
    pickle.dump(NB_Model, f)
file = drive.CreateFile({'title' : 'NB_Model_lat.pkl'})
file.SetContentFile('NB_Model_lat.pkl')
file.Upload()







Esempio n. 15
0
#Autheticate E-Mail ID

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

from google.colab import drive
drive.mount('/content/drive')

"""# EDA"""

#Get File from Drive using file-ID

#2.1 Get the Zillow file
downloaded = drive.CreateFile({'id':'1VkaBwlaXR90PWZtNiJtr4Eg3RVwpnlwi'}) # replace the id with id of file you want to access
downloaded.GetContentFile('Zillow.csv') 
#2.2 Get the minum wage file
download = drive.CreateFile({'id':'1Wy1U-aWNw0xP26Kg2YwvIG5UiOWnMHa6'}) # replace the id with id of file you want to access
download.GetContentFile('Minimum Wage Data.csv')

# Read House Price dataset

df1 = pd.read_csv('/content/drive/MyDrive/Final.csv')
df1.head()

# Importing the libraries
import numpy as np
# import matplotlib.pyplot as plt
import pandas as pd
import pickle
Esempio n. 16
0
!pip install -U -q PyDrive

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# 上記の方法で調べたファイルのID
file_id = "1g5ZfFNVRWfSrlL3HIW51jn9ZIpjwyK7m"
drive_file = drive.CreateFile({'id': file_id})

# ファイルの取得
drive_file.GetContentFile("lena.jpg")

"""そして、、、

ファイルから画像取り出す、それを保存
"""

## ファイルにある「lena.jpg」画像ファイルを読み込む
import cv2
lena = cv2.imread('/content/lena.jpg')

## googledriveの中から取り出すなら、/content/drive/My Drive/〇〇.jpg
end = time.time()
print("Model took %0.2f seconds to train"%(end - start))

!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive 
from google.colab import auth 
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

model.save('model_CNN.h5')
model_file = drive.CreateFile({'CNN_model' : 'model_CNN.h5'})
model_file.SetContentFile('model_CNN.h5')                      
model_file.Upload()

# always save your weights after training or during training
model.save_weights('C:/Users/RohithRamesh/Desktop/CC Configuration/CNN_100_epochs')

from tensorflow.keras.models import load_model

plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
Esempio n. 18
0
import random

from google.colab import drive
drive.mount('/content/gdrive')

!pip install - U - q PyDrive

# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# Copy/download the file
fid = drive.ListFile({'q': "title='Final_project.ipynb'"}).GetList()[0]['id']
f = drive.CreateFile({'id': fid})
f.GetContentFile('Final_project.ipynb')

# Read price data and keep only required columns


def daprice_muiz():
    price_2 = pd.read_csv(
        '/content/gdrive/My Drive/Topics in Data Science/Final Project/Data Source/Price data/2019_2018_OASIS_Day-Ahead_Market_Zonal_LBMP.csv')
    price_1 = pd.read_csv(
        '/content/gdrive/My Drive/Topics in Data Science/Final Project/Data Source/Price data/2016_2017_OASIS_Day-Ahead_Market_Zonal_LBMP.csv')

    # price_2 = pd.read_csv('/content/gdrive/My Drive/ECE 592 Topics in Data Science/Final Project/Data Source/Price data/2019_2018_OASIS_Day-Ahead_Market_Zonal_LBMP.csv')
    # price_1 = pd.read_csv('/content/gdrive/My Drive/ECE 592 Topics in Data Science/Final Project/Data Source/Price data/2016_2017_OASIS_Day-Ahead_Market_Zonal_LBMP.csv')

    price_1.rename(columns={'Eastern Date Hour': 'Datetime', 'DAM Zonal LBMP': 'LBMP',
from google.colab import drive
drive.mount('/content/drive')

import os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

download = drive.CreateFile({'id': '1-umtXiV8Bd0n5eVr2L3pGVmc61MJe2tF'})
download.GetContentFile('testing_tar.tgz')

!tar -xvf  'testing_tar.tgz' -C 'sample_data'

from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, Input
from keras import backend as K
import cv2, numpy as np
import glob
from keras.activations import relu 
import keras as keras
from keras.models import Model
import tensorflow as tf
Esempio n. 20
0
##################################################################################################################################################################

# Code to read csv file into Colaboratory:
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# NB - Google drive shareable link for each python file required. This is different for every drive

GRU = drive.CreateFile({'id':'1XheD3ckzdeUrukYzj0jeINsKhaFIrKTG'}) # https://colab.research.google.com/drive/1XheD3ckzdeUrukYzj0jeINsKhaFIrKTG?usp=sharing
BiGRU = drive.CreateFile({'id':'14KOduMX_vPFOpTrytTqorr_g_lelHQM3'}) # https://colab.research.google.com/drive/14KOduMX_vPFOpTrytTqorr_g_lelHQM3?usp=sharing
BiGRUAtt = drive.CreateFile({'id':'1rOeK2LIb0KadAYRz1MsDbMAliQORI7F9'}) # https://colab.research.google.com/drive/1rOeK2LIb0KadAYRz1MsDbMAliQORI7F9?usp=sharing
BiLSTM = drive.CreateFile({'id':'1b7OkJFVdpdArm6tkHJ5baQb8QHzM3kgz'}) # https://colab.research.google.com/drive/1b7OkJFVdpdArm6tkHJ5baQb8QHzM3kgz?usp=sharing
BiLSTMAtt = drive.CreateFile({'id':'10lkacFL-pjUrZN4xrAOekVcvS8ijiHh3'}) # https://colab.research.google.com/drive/10lkacFL-pjUrZN4xrAOekVcvS8ijiHh3?usp=sharing
GRUAtt = drive.CreateFile({'id':'172uBGbtBXPbGQeAYZ2u7kA7bgHhXLDDj'}) # https://colab.research.google.com/drive/172uBGbtBXPbGQeAYZ2u7kA7bgHhXLDDj?usp=sharing
LSTM = drive.CreateFile({'id':'1znHsM5fzJ9GiRjtrwlpMyCFrvZ_me6a7'}) # https://colab.research.google.com/drive/1znHsM5fzJ9GiRjtrwlpMyCFrvZ_me6a7?usp=sharing
LSTMAtt = drive.CreateFile({'id':'12jFEIOT5LrMe-8lX7WEyWRIs9dxksPg_'}) #https://colab.research.google.com/drive/12jFEIOT5LrMe-8lX7WEyWRIs9dxksPg_?usp=sharing
input_data = drive.CreateFile({'id':'1O3KLuOPf-Yrryb7TbuX8rva3hjhe343w'}) # https://colab.research.google.com/drive/1O3KLuOPf-Yrryb7TbuX8rva3hjhe343w?usp=sharing

GRU.GetContentFile('GRU.ipynb')
BiGRU.GetContentFile('BiGRU.ipynb')
BiGRUAtt.GetContentFile('BiGRUAtt.ipynb')
BiLSTM.GetContentFile('BiLSTM.ipynb')
BiLSTMAtt.GetContentFile('BiLSTMAtt.ipynb')
GRUAtt.GetContentFile('GRUAtt.ipynb')
Esempio n. 21
0
from google.colab import drive
drive.mount('/content/drive')

# Code to read csv file into colaboratory:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

downloaded = drive.CreateFile({'id':'1oF7toJFWt-tox50GM8I2AT_fvYITkgzZ'}) # replace the id with id of file you want to access
downloaded.GetContentFile('Data_namechanged.pkl')

# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import math

from statsmodels.tsa.arima_model import ARIMA

#data = pd.read_pickle('../IP21_Excel_Data/DataFrames/Data_namechanged.pkl')
data = pd.read_pickle('Data_namechanged.pkl')
data = data.interpolate(method='linear') 
Esempio n. 22
0
import numpy as np
import csv
import pandas
from google.colab import drive
from google.colab import files
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

t = drive.CreateFile({'id':'1QarmXV_FaTfL7CMERlJhSYmyv2ddU2XT'})
t.GetContentFile('./snapshots/resnet_1k.h5')

PRETRAINED_MODEL = './snapshots/_pretrained_model.h5'

#### OPTION 1: DOWNLOAD INITIAL PRETRAINED MODEL FROM FIZYR ####
URL_MODEL = 'https://github.com/fizyr/keras-retinanet/releases/download/0.5.0/resnet50_coco_best_v2.1.0.h5'
urllib.request.urlretrieve(URL_MODEL,PRETRAINED_MODEL)

#### OPTION 2: DOWNLOAD CUSTOM PRETRAINED MODEL FROM GOOGLE DRIVE. CHANGE DRIVE_MODEL VALUE. USE THIS TO CONTINUE PREVIOUS TRAINING EPOCHS ####
#drive.mount('/content/gdrive')
#DRIVE_MODEL = '/content/gdrive/My Drive/Colab Notebooks/objdet_tensorflow_colab/resnet50_csv_10.h5'
#shutil.copy(DRIVE_MODEL, PRETRAINED_MODEL)


print('Downloaded pretrained model to ' + PRETRAINED_MODEL)
Esempio n. 23
0
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
def print_heading(string, color=None):
    print_html(string, tag='h3', color=color)

# 2. Save Keras Model or weights on google drive

# create on Colab directory
model.save('TakeoverQuality_model_NeuralNetwork.h5')    
model_file = drive.CreateFile({'title' : 'TakeoverQuality_model_NeuralNetwork.h5'})
model_file.SetContentFile('TakeoverQuality_model_NeuralNetwork.h5')
model_file.Upload()

# download to google drive
drive.CreateFile({'id': model_file.get('id')})

from google.colab import files
files.download("best-ThreeClasses-quality-06-0.00.hdf5")

import glob, math, os, sys, zipfile
from IPython.display import display, HTML
# Some global variables and general settings
saved_model_dir = './saved_model'
tensorboard_logs = './logs'
pd.options.display.float_format = '{:.2f}'.format
# This only needs to be done once in a notebook.
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# Create & upload a text file.
uploaded = drive.CreateFile({'title': 'Sample file.txt'})
uploaded.SetContentString('Sample upload file content')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))

from google.colab import auth
auth.authenticate_user()

!pip install h5py pyyaml
!pip install tf_nightly

!pip install torch
import torch

!pip install torchvision
import torchvision
Esempio n. 25
0
model.add(Dropout(0.4))
 
model.add(Flatten())
model.add(Dense(128, activation="sigmoid"))
model.add(Activation('relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(0.0003), metrics=['accuracy'])

model.summary()

model.fit(x = x_train_gr,y = y_train, batch_size=64, validation_data = (x_test_gr,y_test), epochs = 5)

model.save_weights('casual_training2.h5')

!pip install -U -q PyDrive

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

model.save('model2.h5')
model_file = drive.CreateFile({'colab_models':'model2.h5'})
model_file.SetContentFile('model2.h5')
model_file.Upload()

drive.CreateFile({'id': model_file.get('id')})

file_obj = drive.CreateFile({'id': '1ZCJ4_AnKkdBGBeZRkpkTRZUjlqO5HMRP'})
file_obj.GetContentFile('model1.h5')
Esempio n. 26
0
collected= gc.collect()
print("garbage collected= ",collected)

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)  

# get the folder id where you want to save your file
file = drive.CreateFile()
file.SetContentFile('combined_feature_x.pickle')
file.Upload()
# del(x)
import gc
collected= gc.collect()
print("garbage collected= ",collected)

import pickle as pk
f=open('ytrain.pickle','rb')
ytrain= pk.load(f)
f.close()

sentiment_train, sentiment_val, wv_train, wv_val, base_train, base_val, trainLabels, valLabels = train_test_split(sentiment_train, wv_train, base_train, trainLabels, test_size=0.20, random_state=42)

from sklearn.model_selection import train_test_split
Esempio n. 27
0
  path = '/content/' + fn
  img = image.load_img(path, target_size=(150, 150))
  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)

  images = np.vstack([x])
  classes = model.predict(images, batch_size=64)
  print(fn)
  print(classes[0][0])
  if classes[0][0]>0.5:
    print(fn + " is not masked")
  else:
    print(fn + " is masked")

!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive 
from google.colab import auth 
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

model.save('model.h5')
model_file = drive.CreateFile({'title' : 'model.h5'})
model_file.SetContentFile('model.h5')
model_file.Upload()

drive.CreateFile({'id': model_file.get('id')})
Esempio n. 28
0
!pip install PyDrive

from google.colab import drive
drive.mount('/content/drive')

from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

downloaded = drive.CreateFile({'id':"16_ZNThSbAQyY1Hw6CXBm6hteZ0eeLJpI"})   # replace the id with id of file you want to access
downloaded.GetContentFile('pytorch_model.bin')        # replace the file name with your file
downloaded = drive.CreateFile({'id':"1ill09R5sdg7GzBCfKfGty8eW5F9YZ-m1"})   # replace the id with id of file you want to access
downloaded.GetContentFile('config.json')

"""#Code started"""

!pip install transformers==2.4.1 -q

import torch
from transformers import *

model_name = 'roberta-large' #uncased should have do_lower_case=True
model = AutoModelForSequenceClassification.from_pretrained('./')
tokenizer = RobertaTokenizer.from_pretrained(model_name, do_lower_case=False)
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()


# Install the PyDrive wrapper & import libraries.
# This only needs to be done once in a notebook.
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# Create & upload a file.
uploaded = drive.CreateFile({'title': 'MINI_PROJECT_MODEL_FINAL.h5'})
uploaded.SetContentFile('MINI_PROJECT_MODEL_FINAL.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))


batch_size = 256
epochs = 18

hist = model2.fit(X_train_pad, y_train, 
                 batch_size=batch_size,
                 epochs=epochs,
                 validation_data=(X_test_pad,y_test))

accr = model2.evaluate(X_test_pad,y_test)
print('Test set\n  Loss: {:0.3f}\n  Accuracy: {:0.3f}'.format(accr[0],accr[1]))

model2.save('CNN_with_word2vec.h5')

model2.save('CNN_with_word2vec.h5')
model2_file = drive.CreateFile({'title' : 'CNN_with_word2vec.h5'}) 
model2_file.SetContentFile('CNN_with_word2vec.h5') 
model2_file.Upload()

# Accuracy plot
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()

# Loss plot
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])