def update(cust_id, app_id):
    if len(meta_data) == 0:
        data.initialize_data()
        set_meta_data()

    meta_data[app_id].append(cust_id)
    print(meta_data)
Exemple #2
0
torch.manual_seed(args.seed)

workers = 1
# Check if CUDA is available
if torch.cuda.is_available():
    using_gpu = True
    print("GPU enabled")
    workers = 4
else:
    using_gpu = False
    print("GPU not enabled")

### Data Initialization and Loading
from data import initialize_data, data_transforms, data_crop, data_rot, data_rotshear,\
                data_transl, data_cropshear, data_jitter1, data_grey, data_jitter3, data_jitter4 # data.py in the same folder
initialize_data(args.data)  # extracts the zip files, makes a validation set

train_loader = torch.utils.data.DataLoader(
    torch.utils.data.ConcatDataset([
        datasets.ImageFolder(args.data + '/train_images', transform=data_crop),
        datasets.ImageFolder(args.data + '/train_images', transform=data_rot),
        datasets.ImageFolder(args.data + '/train_images',
                             transform=data_jitter1),
        #    datasets.ImageFolder(args.data + '/train_images',
        #                         transform=data_flip),
        #    datasets.ImageFolder(args.data + '/train_images',
        #                         transform=data_grey),
        datasets.ImageFolder(args.data + '/train_images',
                             transform=data_rotshear),
        datasets.ImageFolder(args.data + '/train_images',
                             transform=data_transl),
Exemple #3
0
import os

from flask import Flask, render_template, request, Response, json
import pandas as pd

import data
import logic
import validator


app = Flask(__name__)
app.secret_key = os.urandom(24)

cdi_data = data.initialize_data()


@app.route('/')
def chart():
    return render_template('chart.html')


@app.route('/api', methods=["GET", "POST"])
def api():

    try:
        input_data = request.json if request.method == 'POST' else request.args

        start, end, cdb = validator.validate_request(input_data)
        result = logic.calculate_cdb_for_period(cdi_data, cdb, start, end)

        response = result.to_json(orient='records')
Exemple #4
0
    edit_data_button.clicked.connect(lambda: open_edit_dialog())
    actions_vbox.addWidget(edit_data_button)

    # Exit button
    exit_button = QPushButton("Exit")
    exit_button.clicked.connect(lambda: app.exit(0))
    actions_vbox.addWidget(exit_button)

    return actions_vbox


def get_main_layout():
    layout = QHBoxLayout()
    layout.addLayout(get_selection_vbox())
    layout.addStretch()
    layout.addLayout(get_actions_vbox())

    return layout


if __name__ == "__main__":
    app = QApplication(sys.argv)
    main_window = QWidget()
    main_window.setWindowTitle("Deployment Helper")
    data.initialize_data()
    edit_add_dialog.get_customer_drop_down()
    main_window.setLayout(get_main_layout())
    main_window.show()
    app.exec()
    # sys.exit(app.exec())
Exemple #5
0
import torch
import matplotlib.pyplot as plt
from torchvision import datasets
import multiprocessing
from cutout import save_image_tensor2pillow

data_path = 'data/data0'
save_dir = './footage/'
from data import initialize_data, data_jitter_brightness
initialize_data(data_path)  # extracts the zip files, makes a validation set

train_loader = torch.utils.data.DataLoader(
    torch.utils.data.ConcatDataset([
        datasets.ImageFolder(data_path + '/train_images',
                             transform=data_jitter_brightness)
    ]),
    batch_size=1,
    shuffle=True,
    num_workers=multiprocessing.cpu_count(),
    pin_memory=True)

for batch_idx, (data, target) in enumerate(train_loader):
    print(type(data), target)
    target = target.to(torch.device('cpu')).type(torch.uint8).numpy()
    save_path = save_dir + format(target[0], '05d') + '.png'
    img = save_image_tensor2pillow(data, save_path, 'pil', True)
    plt.figure("img")
    plt.imshow(img)
    plt.show()
    break
                    help='SGD momentum (default: 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
                    help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                    help='how many batches to wait before logging training status')
parser.add_argument('--model_file', type=str, default=None, metavar='PU',
                    help='pick up where you were (default: None)')
parser.add_argument('--model_name', type=str, default='model', metavar='MN',
                    help='name of the model file (default: model)')
args = parser.parse_args()

torch.manual_seed(args.seed)

### Data Initialization and Loading
from data import initialize_data, data_transforms # data.py in the same folder
train_images, train_labels, val_images, val_labels = initialize_data(args.data) # extracts the zip files, makes a validation set




train_dataset = torch.utils.data.TensorDataset(train_images, train_labels)
val_dataset = torch.utils.data.TensorDataset(val_images, val_labels)


train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=args.batch_size, shuffle=True, num_workers=4)
val_loader = torch.utils.data.DataLoader(
    val_dataset,
    batch_size=args.batch_size, shuffle=False, num_workers=4)
    metavar='M',
    help="the model file to be evaluated. Usually it is of the form model_X.pth"
)
parser.add_argument('--outfile',
                    type=str,
                    default='gtsrb_kaggle.csv',
                    metavar='D',
                    help="name of the output csv file")

args = parser.parse_args()

state_dict = torch.load(args.model)
model = Net()
model.load_state_dict(state_dict)
model.eval()
initialize_data(args.data)
from data import data_transforms

test_dir = args.data + '/test_images'


def pil_loader(path):
    # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
    with open(path, 'rb') as f:
        with Image.open(f) as img:
            return img.convert('RGB')


output_file = open(args.outfile, "w")
output_file.write("Filename,ClassId\n")
for f in tqdm(os.listdir(test_dir)):
        for i, img in enumerate(aug_imgs[class_id]):
            img_path = os.path.join(class_path, '{:05d}'.format(i) + '.png')
            io.imsave(img_path, img)


# We also need to pre-process the test set
def sharp_img(source_dir, dst_dir):
    img_paths = glob.glob(os.path.join(source_dir, '*.ppm'))
    if not os.path.isdir(dst_dir):
        print(dst_dir + ' not found, expanding it')
        os.mkdir(dst_dir)
    for img_path in img_paths:
        img = io.imread(img_path)
        img = hist_norm(img)
        save_path = os.path.join(dst_dir, os.path.basename(img_path))
        io.imsave(save_path, img)


from data import initialize_data

initialize_data('data')  # extracts the zip files, makes a validation set

# In[5]:
aug_img_set(4000, 'data/train_images', 'data/train_aug_images')

# In[6]:
aug_img_set(100, 'data/val_images', 'data/val_aug_images')

# In[7]:
sharp_img('data/test_images', 'data/test_sharp_images')