Пример #1
0
def call_fibaro_api(url):
    try:
        response = requests.get(url, auth=(config.fibaro_user, config.fibaro_password))
    except requests.exceptions.ConnectionError:
        hlp.write_log("ERROR: Connection error for " + url)
        raise SystemExit("ERROR: Connection error for " + url)
    return response
Пример #2
0
def train():
    print('start training ...........')
    batch_size = 32
    num_epochs = 600
    lr = 0.001

    device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
    model = SSD_VGG(num_classes=21, device=device, freeze=False)
    # model.load_state_dict(torch.load('output/weight.pth', map_location=device))
    train_loader, val_loader = get_loader(batch_size=batch_size)

    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=0.0005)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=50)
    criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy, device=device)

    train_losses, val_losses = [], []
    for epoch in range(num_epochs):
        train_epoch_loss = fit(epoch, model, optimizer, criterion, device, train_loader, phase='training')
        val_epoch_loss = fit(epoch, model, optimizer, criterion, device, val_loader, phase='validation')
        print('-----------------------------------------')

        if epoch == 0 or val_epoch_loss <= np.min(val_losses):
            torch.save(model.state_dict(), 'output/weight.pth')

        # if epoch == 0 or train_epoch_loss <= np.min(train_losses):
        #     torch.save(model.state_dict(), 'output/weight.pth')

        train_losses.append(train_epoch_loss)
        val_losses.append(val_epoch_loss)

        write_figure('output', train_losses, val_losses)
        write_log('output', epoch, train_epoch_loss, val_epoch_loss)

        scheduler.step(val_epoch_loss)
def train(root, device, model, epochs, bs, lr):
    print('start training ...........')
    train_loader, val_loader = get_loader(root=root,
                                          batch_size=bs,
                                          shuffle=True)

    optimizer = optim.Adam(model.parameters(), lr=lr)
    criterion = HDRLoss(device)

    train_losses, val_losses = [], []
    for epoch in range(epochs):
        train_epoch_loss = fit(epoch,
                               model,
                               optimizer,
                               criterion,
                               device,
                               train_loader,
                               phase='training')
        val_epoch_loss = fit(epoch,
                             model,
                             optimizer,
                             criterion,
                             device,
                             val_loader,
                             phase='validation')
        print('-----------------------------------------')

        if epoch == 0 or val_epoch_loss <= np.min(val_losses):
            torch.save(model.state_dict(), 'output/weight.pth')

        train_losses.append(train_epoch_loss)
        val_losses.append(val_epoch_loss)

        write_figures('output', train_losses, val_losses)
        write_log('output', epoch, train_epoch_loss, val_epoch_loss)
Пример #4
0
 def __init__(self):
     version = "0.1"
     # Проверить наличие setting.ini
     if not os.path.isfile(r"{0}\\settings.ini".format(getcwd())):
         helper.write_log("Нет файла settings.ini", "error")
         # Запуск процедуры создания файла основных настроек
         self.create_global_setting()
Пример #5
0
def detect_paranada(hist, filename):
    mean = np.mean(hist)
    std = np.std(hist)
    stat_z = [(s-mean)/std for s in hist]
    
    paranada = (np.abs(stat_z) > 2)
    indices = [i for i, x in enumerate(paranada) if x == True]
    
    if indices == []:
        max_hist = max(hist)

        paranada = (np.abs(np.abs(hist - max_hist) <= 2))
        indices = [i for i, x in enumerate(paranada) if x == True]

        log_message = "WARNING: Failed to get outlier of " + filename
        helper.write_log('dataset', '4', log_message)
        # helper.show_plot(i, hist, "")
    
    group_paranada = list(helper.split_tol(indices,2))
    paranada_index = [i[0] for i in group_paranada]

    if len(paranada_index) < 5:
        log_message = "FATAL ERROR: Paranada index of " + filename + " is not completely detected"
        helper.write_log('dataset', '1', log_message)
        print("Something error, please check dataset.log!")
    
    return paranada, group_paranada, paranada_index
Пример #6
0
def get_data(obj):
    hlp.write_log("### Getting data from " + obj + " API: ", eol=False)
    target_url = config.fibaro_url + obj
    r = call_fibaro_api(target_url)
    if r.status_code == 200:
        save_data(r.json(), obj)
    else:
        hlp.write_log("ERROR: error from " + obj + "API code: " + str(r.status_code))
Пример #7
0
	def create_settings(self):
		path = r'{0}\\settings.ini'.format(getcwd())
		try:
			file = open(path, 'w')
			file.write('[global]\n')
			file.write('setup = true\n')
			file.close()
		except:
			helper.write_log('Невозможно создать файл settings.txt')
Пример #8
0
 def export_codebooks(self, filename):
     f = open(filename + ".csv", "w")
     for codebook in self.codebooks:
         for num in codebook[:-1]:
             f.write(str(num) + ", ")
         f.write(str(codebook[-1]) + "\n")
     f.close()
     msg = "Export codebooks vector to " + filename + ".csv success"
     helper.write_log("dataset", '3', msg)
Пример #9
0
def train(root, device, model, epochs, bs, lr):
    print('start training ...........')
    train_loader, val_loader = get_loader(root=root,
                                          batch_size=bs,
                                          shuffle=True)

    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=0.9,
                          nesterov=True)
    scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, 50, 1)
    criterion = nn.CrossEntropyLoss()

    train_losses, train_acc = [], []
    val_losses, val_acc = [], []
    for epoch in range(epochs):
        scheduler.step(epoch)

        train_epoch_loss, train_epoch_acc = fit(epoch,
                                                model,
                                                optimizer,
                                                criterion,
                                                device,
                                                train_loader,
                                                phase='training')
        val_epoch_loss, val_epoch_acc = fit(epoch,
                                            model,
                                            optimizer,
                                            criterion,
                                            device,
                                            val_loader,
                                            phase='validation')
        print('-----------------------------------------')

        if epoch == 0 or val_epoch_acc >= np.max(val_acc):
            torch.save(model.state_dict(), 'output/weight.pth')

        train_losses.append(train_epoch_loss)
        train_acc.append(train_epoch_acc)
        val_losses.append(val_epoch_loss)
        val_acc.append(val_epoch_acc)

        write_figures('output', train_losses, val_losses, train_acc, val_acc)
        write_log('output', epoch, train_epoch_loss, val_epoch_loss,
                  train_epoch_acc, val_epoch_acc)

        scheduler.step()
Пример #10
0
def train():
    print('start training ...........')
    device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
    model = Model().to(device)
    batch_size = 2
    num_epochs = 100
    learning_rate = 0.1

    train_loader, val_loader = get_loader(batch_size=batch_size, shuffle=True)

    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate,
                          momentum=0.9,
                          nesterov=True)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
    criterion = DiceLoss(smooth=1.)

    train_losses, val_losses = [], []
    for epoch in range(num_epochs):
        train_epoch_loss = fit(epoch,
                               model,
                               optimizer,
                               criterion,
                               device,
                               train_loader,
                               phase='training')
        val_epoch_loss = fit(epoch,
                             model,
                             optimizer,
                             criterion,
                             device,
                             val_loader,
                             phase='validation')
        print('-----------------------------------------')

        if epoch == 0 or val_epoch_loss <= np.min(val_losses):
            torch.save(model.state_dict(), 'output/weight.pth')

        train_losses.append(train_epoch_loss)
        val_losses.append(val_epoch_loss)

        write_figures('output', train_losses, val_losses)
        write_log('output', epoch, train_epoch_loss, val_epoch_loss)

        scheduler.step(val_epoch_loss)
Пример #11
0
    def create_global_setting(self):
        """
		Создается файл settings.ini. Файл находится в папке с программой.
		setup = true. Нужно зарегестрировать пользователя.
		setup = false. Пользователь зарегестрирован.
		:return:
		"""
        path = r"{0}\\settings.ini".format(getcwd())
        try:
            file = open(path, "w")
            file.write("[global]\n")
            file.write("registration = true\n")
            file.write("version = {0}\n".format(version))
            file.close()
        except IOError as e:
            helper.write_log("Ошибка I/O({0}): {1}".format(e.errno, e.strerror), "error")
        except:
            helper.write_log("непредвиденная ошибка: {0}".format(sys.exc_info()[0]), "error")
Пример #12
0
    def import_codebooks(self, filename):
        dataset = list()
        with open(filename, 'r') as file:
            csv_reader = reader(file)
            n = 1
            for row in csv_reader:
                if not row:
                    continue
                dataset.append(row)
                n += 1

        for i in range(len(dataset[0]) - 1):
            self.str_column_to_float(dataset, i)

        # convert class column to integers
        self.str_column_to_int(dataset, -1)

        self.n_codebooks = n
        self.codebooks = dataset

        msg = "Import codebooks vector from " + filename + " success"
        helper.write_log("dataset", '3', msg)
Пример #13
0
def get_events(days=8):
    today_bod = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
    today_eod = today_bod + timedelta(1)
    hlp.write_log(datetime.now().strftime("### Getting data from Events API"))
    # Get last n days (including today)
    for d in range(days):
        start_dt = today_bod - timedelta(d)
        end_dt = today_eod - timedelta(d)
        start = str(int(datetime.timestamp(start_dt)))
        end = str(int(datetime.timestamp(end_dt)))

        target_url = config.fibaro_url + 'panels/event?' + 'from=' + start + "&" + "to=" + end
        r = call_fibaro_api(target_url)
        if r.status_code == 200:
            save_data(r.json(), start_dt.strftime('events_%Y-%m-%d-%H-%M-%S') + "_-_"
                      + end_dt.strftime('%Y-%m-%d-%H-%M-%S'))
            hlp.write_log("Downloaded events from date: " + start_dt.strftime('%Y-%m-%d') + ": " + target_url)
        else:
            hlp.write_log("ERROR: error from API code: " + str(r.status_code))
        time.sleep(0.1)
Пример #14
0
def save_data(json_data, filename_part):
    filename_with_path = os.path.join(config.data_directory, filename_part + ".json")
    temp_filename_with_path = filename_with_path + ".temp"
    bak_filename_with_path = os.path.join(config.data_backup_directory, filename_part + ".json"
                                          + datetime.now().strftime('.%Y-%m-%d-%H-%M-%S.bak'))
    exists = os.path.isfile(filename_with_path)
    if exists:
        hlp.write_json(json_data, temp_filename_with_path)
        if os.path.getsize(filename_with_path) == os.path.getsize(temp_filename_with_path):
            os.remove(temp_filename_with_path)
            hlp.write_log("File already existed and is the same. Keeping old file: "
                          + filename_with_path, dtm_prefix=False)
        else:  # files are different
            # backup old file
            os.rename(filename_with_path, bak_filename_with_path)
            # rename temporary file
            os.rename(temp_filename_with_path, filename_with_path)
            hlp.write_log("File already exists but it's different. Old file was backed up as: " +
                          bak_filename_with_path, dtm_prefix=False)
    else:
        hlp.write_json(json_data, filename_with_path)
        hlp.write_log("File downloaded and stored as: " + filename_with_path, dtm_prefix=False)
Пример #15
0
def main(seed=25):
    seed_everything(25)
    device = torch.device('cuda:0')

    # arguments
    args = Args().parse()
    n_class = args.n_class

    img_path_train = args.img_path_train
    mask_path_train = args.mask_path_train
    img_path_val = args.img_path_val
    mask_path_val = args.mask_path_val

    model_path = os.path.join(args.model_path, args.task_name)  # save model
    log_path = args.log_path
    output_path = args.output_path

    if not os.path.exists(model_path):
        os.makedirs(model_path)
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    task_name = args.task_name
    print(task_name)
    ###################################
    evaluation = args.evaluation
    test = evaluation and False
    print("evaluation:", evaluation, "test:", test)

    ###################################
    print("preparing datasets and dataloaders......")
    batch_size = args.batch_size
    num_workers = args.num_workers
    config = args.config

    data_time = AverageMeter("DataTime", ':3.3f')
    batch_time = AverageMeter("BatchTime", ':3.3f')

    dataset_train = DoiDataset(img_path_train,
                               config,
                               train=True,
                               root_mask=mask_path_train)
    dataloader_train = DataLoader(dataset_train,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers)
    dataset_val = DoiDataset(img_path_val,
                             config,
                             train=True,
                             root_mask=mask_path_val)
    dataloader_val = DataLoader(dataset_val,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers)

    ###################################
    print("creating models......")
    model = DoiNet(n_class, config['min_descriptor'] + 6, 4)
    model = create_model_load_weights(model,
                                      evaluation=False,
                                      ckpt_path=args.ckpt_path)
    model.to(device)

    ###################################
    num_epochs = args.epochs
    learning_rate = args.lr

    optimizer = get_optimizer(model, learning_rate=learning_rate)
    scheduler = LR_Scheduler(args.scheduler, learning_rate, num_epochs,
                             len(dataloader_train))
    ##################################
    criterion_node = nn.CrossEntropyLoss()
    criterion_edge = nn.BCELoss()
    alpha = args.alpha

    writer = SummaryWriter(log_dir=log_path + task_name)
    f_log = open(log_path + task_name + ".log", 'w')
    #######################################
    trainer = Trainer(criterion_node,
                      criterion_edge,
                      optimizer,
                      n_class,
                      device,
                      alpha=alpha)
    evaluator = Evaluator(n_class, device)

    best_pred = 0.0
    print("start training......")
    log = task_name + '\n'
    for k, v in args.__dict__.items():
        log += str(k) + ' = ' + str(v) + '\n'
    print(log)
    f_log.write(log)
    f_log.flush()

    for epoch in range(num_epochs):
        optimizer.zero_grad()
        tbar = tqdm(dataloader_train)
        train_loss = 0
        train_loss_edge = 0
        train_loss_node = 0

        start_time = time.time()
        for i_batch, sample in enumerate(tbar):
            data_time.update(time.time() - start_time)

            if evaluation:  # evaluation pattern: no training
                break
            scheduler(optimizer, i_batch, epoch, best_pred)
            loss, loss_node, loss_edge = trainer.train(sample, model)
            train_loss += loss.item()
            train_loss_node += loss_node.item()
            train_loss_edge += loss_edge.item()
            train_scores_node, train_scores_edge = trainer.get_scores()

            batch_time.update(time.time() - start_time)
            start_time = time.time()

            if i_batch % 2 == 0:
                tbar.set_description(
                    'Train loss: %.4f (loss_node=%.4f  loss_edge=%.4f); F1 node: %.4f  F1 edge: %.4f; data time: %.2f; batch time: %.2f'
                    % (train_loss / (i_batch + 1), train_loss_node /
                       (i_batch + 1), train_loss_edge /
                       (i_batch + 1), train_scores_node["macro_f1"],
                       train_scores_edge["macro_f1"], data_time.avg,
                       batch_time.avg))

        trainer.reset_metrics()
        data_time.reset()
        batch_time.reset()

        if epoch % 1 == 0:
            with torch.no_grad():
                model.eval()
                print("evaluating...")

                tbar = tqdm(dataloader_val)
                start_time = time.time()
                for i_batch, sample in enumerate(tbar):
                    data_time.update(time.time() - start_time)
                    pred_node, pred_edge = evaluator.eval(sample, model)
                    val_scores_node, val_scores_edge = evaluator.get_scores()

                    batch_time.update(time.time() - start_time)
                    tbar.set_description(
                        'F1 node: %.4f  F1 edge: %.4f; data time: %.2f; batch time: %.2f'
                        % (val_scores_node["macro_f1"],
                           val_scores_edge["macro_f1"], data_time.avg,
                           batch_time.avg))
                    start_time = time.time()

            data_time.reset()
            batch_time.reset()
            val_scores_node, val_scores_node = evaluator.get_scores()
            evaluator.reset_metrics()

            best_pred = save_model(model, model_path, val_scores_node,
                                   val_scores_edge, alpha, task_name, epoch,
                                   best_pred)
            write_log(f_log, train_scores_node, train_scores_edge,
                      val_scores_node, val_scores_edge, epoch, num_epochs)
            write_summaryWriter(writer, train_loss / len(dataloader_train),
                                optimizer, train_scores_node,
                                train_scores_edge, val_scores_node,
                                val_scores_edge, epoch)

    f_log.close()
Пример #16
0
import fibaro_api_functions as api
import helper as hlp

hlp.write_log("##################### New process started")
api.get_data("sections")
api.get_data("rooms")
api.get_data("scenes")
api.get_data("devices")
api.get_events()
hlp.write_log("##################### Update process completed")
Пример #17
0
	def __init__(self):
		#Проверка наличия фала settings.ini
		if not os.path.isfile('settings.ini'):
			helper.write_log('Нет файла settings.ini')
			#Запустить процедуру создания первоначального файла настроек
			self.create_settings()