Esempio n. 1
0
def kriging_plot():
    #date =  str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
    data = dataset.data()
    data.sort_values(by=['component'])
    #data.set_index(keys=['component'], drop=False,inplace=True)
    names = data['component'].unique().tolist()

    for component in names:
        compdata = data.loc[data.component == component]
        compdata = compdata.reset_index(drop=True)
        if (4 > len(compdata.index)):
            continue

        krige_data = krige_task(compdata[[
            'latitude', 'longitude', 'value', 'unit', 'component', 'x', 'y'
        ]])

        #define documentDataFrame(compdata, columns=['latitude','longitude', 'value', 'unit', 'component'] )
        document = {
            'date': str(compdata['toTime'].iloc[0]),
            'component': str(compdata['component'].iloc[0]),
            'data':
            data.loc[data.component == component].to_json(orient='index'),
            'krige_data': simplejson.dumps(krige_data.tolist())
        }
        # 'data' : compdata.to_json(orient='index'),
        #connect to db
        client = Cloudant(user, password, url=url, connect=True)
        db = client.create_database(db_name, throw_on_exists=False)
        #store document
        document = db.create_document(document)
    #disconnect from db
    client.disconnect()
    return data.to_html()
Mukherjee emailed me in 2017, into an sql server database.
'''

filename_input_csv = "C:/users/podengo/git/citrus/data/am4ir/FloridaQatar_AM4IR_pilot_filtered.txt"

dsr = Dataset(dbms='csv',
              delimiter='\t',
              name=filename_input_csv,
              open_mode="r",
              encoding='utf-8')

row_set = dsr.dict_reader()

#Print field/column/header values
print("Input field names={}".format(row_set.fieldnames))

#for i0,row in enumerate(row_set):

# create pyodbc connection for the table to which we will write
# create dataset writer for input dataset 'a' for am4ir in year 2017

dsw = Dataset(dbms="pyodbc",
              open_mode="w",
              encoding='utf=8',
              server=".\SQLEXPRESS",
              db="silodb",
              table='am4ir_2017a')

data(dsr=dsr, dsw=dsw, verbosity=1)
#dw.writeheader()
Esempio n. 3
0
from utils import *
from model import MyModel
import datetime
os.environ["CUDA_VISIBLE_DEVICES"] = "0,7,6,5,4,3,2,1"

if __name__ == '__main__':

    # parameters
    config = read_config()
    scale = int(config['dataset'][-1:])
    checkpoint_path = 'checkpoint/' + config['dataset'][-10:] + '/model'
    log_dir = "logs/fit/" + config['dataset'] + '-' + datetime.datetime.now(
    ).strftime("%Y%m%d-%H%M%S")

    # dataset
    trainset, valset, testset = data(name=config['dataset'])

    # model
    model = MyModel(blocks=config['blocks'],
                    channel=config['channel'],
                    scale=scale)

    # lr ExponentialDecay
    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        config['learning_rate'],
        decay_steps=config['decay_steps'],
        decay_rate=config['decay_rate'])

    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
        loss=Loss(),
from dataset import data
from sklearn import linear_model, datasets
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, r2_score
import numpy as np
train_x, train_y, test_x, test_y = data()
reg = linear_model.LinearRegression()
reg.fit(train_x, train_y)
pred = reg.predict(test_x)

print(mean_squared_error(pred, test_y), r2_score(pred, test_y))

plt.scatter(test_x, test_y)
plt.plot(test_x, pred)

plt.show()
Esempio n. 5
0
batch_size=30
epochs=30


filename1='/home/orion/Downloads/facial-landmarks/X_TRAIN/'
filename2='/home/orion/Downloads/facial-landmarks/Y_TRAIN/'
filename3='/home/orion/Downloads/facial-landmarks/X_TEST/'
filename4='/home/orion/Downloads/facial-landmarks/Y_TEST/'
files1=os.listdir(filename1)
files2=os.listdir(filename2)
files3=os.listdir(filename3)
files4=os.listdir(filename4)


X_train=data(filename1,files1)
Y_train=data(filename2,files2)
X_test=data(filename3,files3)
Y_test=data(filename4,files4)


data1=im(X_train)
data2=im(Y_train)
data3=im(X_test)
data4=im(Y_test)


x_train=np.asarray(data1)
y_train=np.asarray(data2)
x_test=np.asarray(data3)
y_test=np.asarray(data4)
latest_checkpoint = tf.train.latest_checkpoint(weights_dir)
model.load_weights(latest_checkpoint)

image = tf.keras.Input(shape=[None, None, 3], name="image")
predictions = model(image, training=False)
detections = DecodePredictions(confidence_threshold=0.5)(image, predictions)
inference_model = tf.keras.Model(inputs=image, outputs=detections)


def prepare_image(image):
    image, _, ratio = resize_and_pad_image(image, jitter=None)
    image = tf.keras.applications.resnet.preprocess_input(image)
    return tf.expand_dims(image, axis=0), ratio


_, _, dataset_info = data()

val_dataset = tfds.load("coco/2017", split="validation", data_dir="data")
int2str = dataset_info.features["objects"]["label"].int2str

for sample in val_dataset.take(2):
    image = tf.cast(sample["image"], dtype=tf.float32)
    input_image, ratio = prepare_image(image)
    detections = inference_model.predict(input_image)
    num_detections = detections.valid_detections[0]
    class_names = [
        int2str(int(x)) for x in detections.nmsed_classes[0][:num_detections]
    ]
    visualize_detections(
        image,
        detections.nmsed_boxes[0][:num_detections] / ratio,
Esempio n. 7
0
        test_loss += custom_loss(output, target, vol=True).data[0]
            
    test_loss /= len(dtloader) # loss function already averages over batch size
    print('Test set: Average loss: {:.4f}'.format(test_loss))

    return test_loss

#################################################################################
########     Main code    #######################################################
#################################################################################
###Load the dataset

opt.nc = 1
import dataset as nuclei

dataset = nuclei.data(opt.dataroot_images, opt.dataroot_masks, image_size = opt.imageSize,
                      nb_images = opt.n_train, nb_crops = opt.n_crops)
dataset.normalize()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True,  **kwargs)

testset = nuclei.data(opt.dataroot_images, opt.dataroot_masks, list_of_data = dataset.remaining_data,
                      nb_images = opt.n_train, nb_crops = 1, image_size = opt.imageSize,)
testset.normalize()
testloader = torch.utils.data.DataLoader(testset, batch_size=opt.batchSize, shuffle=True,  **kwargs)

################################################################################
### Create/Read network and initialize

if not exists(opt.experiment):
    makedirs(opt.experiment)

ngpu = int(opt.ngpu)
Esempio n. 8
0
from __future__ import division
import os
import cv2
import dlib
from eye import Eye
from calibration import Calibration
import pandas as pd
import xlwt
from dataset import data

datalist = data()


class GazeTracking(object):
    """
    This class tracks the user's gaze.
    It provides useful information like the position of the eyes
    and pupils and allows to know if the eyes are open or closed
    """
    def __init__(self):
        self.frame = None
        self.eye_left = None
        self.eye_right = None
        self.calibration = Calibration()

        # _face_detector is used to detect faces
        self._face_detector = dlib.get_frontal_face_detector()

        # _predictor is used to get facial landmarks of a given face
        cwd = os.path.abspath(os.path.dirname(__file__))
        model_path = os.path.abspath(
Esempio n. 9
0
    tf.keras.callbacks.ModelCheckpoint(
        filepath=os.path.join(model_dir, "weights" + "_epoch_{epoch}"),
        monitor="loss",
        save_best_only=True,
        save_weights_only=True,
        verbose=1,
    )
]

# Uncomment the following lines, when training on full dataset
# train_steps_per_epoch = dataset_info.splits["train"].num_examples // batch_size
# val_steps_per_epoch = \
#     dataset_info.splits["validation"].num_examples // batch_size

# train_steps = 4 * 100000
# epochs = train_steps // train_steps_per_epoch
train_dataset, val_dataset, _ = data()
epochs = 1

# Running 100 training and 50 validation steps,
# remove `.take` when training on the full dataset
model = make_model()

model.fit(
    train_dataset.take(100),
    validation_data=val_dataset.take(50),
    epochs=epochs,
    callbacks=callbacks_list,
    verbose=1,
)
Esempio n. 10
0
batch_size = 10
max_train_data_per_mat = 30
downsample = 2
img_w = 224
img_h = 224
total_f_num = 12416

zo = zoomout_vgg16.Zoomout_Vgg16("zoomout/vgg16.npy",
                                 weight=img_w,
                                 height=img_h,
                                 downsample=downsample)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
data = dataset.data(main_path="data",
                    sp_num=sp_num,
                    weight=img_w,
                    height=img_h,
                    downsample=downsample)

mat_data = {"train_x": [], "train_y": [], "train_slic": []}

start_time = time.time()
# train data
overflow = False
s = 0
index = 0
while overflow is False:
    print("train %d" % s)
    print("time:%d" % (time.time() - start_time))
    s += 1
    train_data, overflow = data.next_batch(batch=batch_size, kind="train")