Exemplo n.º 1
0
def test_three():
    config = utils.load_config()
    dermis = inputs.SkinData(config['data_dir'], 'dermis')
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()
        image_ph = tf.placeholder(dtype=tf.float32, shape=(None, None, 3))
        images = tf.expand_dims(image_ph, axis=0)
        net = FCN(images, net_params=config['net_params'])

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess,
                          tf.train.latest_checkpoint(config['train_dir']))
            logger.info('Model at step-%d restored successfully!' %
                        sess.run(global_step))
            utils.create_if_not_exists(config['save_path'])
            for i, (image,
                    label) in enumerate(zip(dermis.images, dermis.labels)):
                if i % 5 == 0:
                    logger.info('Processing image %d...' % i)

                prep_image = image_prep_for_test(image)
                pred = np.squeeze(
                    sess.run(net.outputs, feed_dict={image_ph: prep_image}))
                path = os.path.join(config['save_path'],
                                    dermis.listing[i].split('/')[-1] + '.jpg')
                save_all(image, label, pred, path)
Exemplo n.º 2
0
def get_train_val(train: datasets,
                  test_transform: transforms,
                  dataset: str,
                  val_perc: float = 0.1):
    """
    Extract val_perc% of the training set as the validation set.
    :param train: training dataset
    :param test_transform: transformation of the test dataset
    :param dataset: dataset name
    :param val_perc: percentage of the training set to be extracted
    :return: the training set and the validation set
    """
    dataset_length = train.data.shape[0]
    directory = 'datasets/val_permutations/'
    create_if_not_exists(directory)
    file_name = dataset + '.pt'
    if os.path.exists(directory + file_name):
        perm = torch.load(directory + file_name)
    else:
        perm = torch.randperm(dataset_length)
        torch.save(perm, directory + file_name)
    train.data = train.data[perm]
    train.targets = np.array(train.targets)[perm]
    test_dataset = ValidationDataset(
        train.data[:int(val_perc * dataset_length)],
        train.targets[:int(val_perc * dataset_length)],
        transform=test_transform)
    train.data = train.data[int(val_perc * dataset_length):]
    train.targets = train.targets[int(val_perc * dataset_length):]

    return train, test_dataset
Exemplo n.º 3
0
def test_five():
    config = utils.load_config()
    dermis = inputs.SkinData(config['data_dir'], 'dermis')
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()
        image_ph = tf.placeholder(dtype=tf.float32, shape=(None, None, 3))
        images = tf.expand_dims(image_ph, axis=0)
        net = FCN(images, net_params=config['net_params'])

        h, w = tf.shape(image_ph)[0], tf.shape(image_ph)[1]
        upscore = tf.image.resize_images(net.endpoints['conv4'], size=(h, w))
        prob_one = tf.nn.sigmoid(upscore)
        prob_zero = 1 - prob_one
        probs = tf.concat([prob_zero, prob_one], axis=3)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess,
                          tf.train.latest_checkpoint(config['train_dir']))
            logger.info('Model at step-%d restored successfully!' %
                        sess.run(global_step))
            utils.create_if_not_exists(config['save_path'])

            result_before = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}
            result_after = result_before.copy()

            def update_dict(d, to_update):
                for key in to_update:
                    d[key] += to_update[key]

            _, x_test, _, y_test = train_test_split(
                dermis.images,
                dermis.labels,
                random_state=config['split_seed'])
            for i, (image, label) in enumerate(zip(x_test, y_test)):
                prep_image = image_prep_for_test(image)
                probs_o = np.squeeze(
                    sess.run(probs, feed_dict={image_ph: prep_image}))
                cnn_result = np.argmax(probs_o, axis=2)
                cnn_crf_result = crf_post_process(image, probs_o)

                result_bi = count_many(cnn_result, label)
                result_ai = count_many(cnn_crf_result, label)
                update_dict(result_before, result_bi)
                update_dict(result_after, result_ai)
                result_bi.update(metric_many_from_counter(result_bi))
                result_ai.update(metric_many_from_counter(result_ai))

                if i % 5 == 0:
                    logger.info(
                        'Image-{}\nresult before\n{}\nresult after\n{}\n'.
                        format(i, result_bi, result_ai))

            result_before.update(metric_many_from_counter(result_before))
            result_after.update(metric_many_from_counter(result_after))
            logger.info('\nresult before\n{}\nresult after\n{}\n'.format(
                result_before, result_after))
def download_forecast_data(target_date, force=False):
    """Download forecast data for the given target date.

    If force is False, data is only downloaded if it doesn't already exist.
    """

    start_date = end_date = target_date
    target_date_formatted = start_date.strftime(DATE_FORMAT)
    folder = os.path.join(settings.DATA_FOLDER, 'MyOcean', 'Forecast')
    create_if_not_exists(folder)

    datasets = [
        {   # chlorophile, nitrate, phosphate, oxygen...
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_BIO_006_006-TDS',
            'product': 'myov04-med-ogs-bio-an-fc',
            'time': '12:00:00',
        },
        {   # salinity
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_PHYS_006_001_a-TDS',
            'product': 'myov05-med-ingv-sal-an-fc-dm',
            'time': '00:00:00'
        },
        {   # temperature
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_PHYS_006_001_a-TDS',
            'product': 'myov05-med-ingv-tem-an-fc-dm',
            'time': '00:00:00'
        },
        {   # temperature
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_PHYS_006_001_a-TDS',
            'product': 'myov05-med-ingv-cur-an-fc-dm',
            'time': '00:00:00',
            'variables': ['vozocrtx', 'vomecrty']
        }
    ]

    for dataset in datasets:
        filename = '%s-%s.nc' % (dataset['product'], target_date_formatted)
        if not exists(filename, folder) or force:
            try:
                download_myocean_data(
                    service=dataset['service'],
                    product=dataset['product'],
                    variables=dataset.get('variables'),
                    time_start='%s %s' % (start_date, dataset['time']),
                    time_end='%s %s' % (end_date, dataset['time']),
                    folder=folder,
                    filename=filename)
            except MaxTriesExceededException:
                logger.error("Download of file {} from MyOcean failed.".format(
                    filename))

        else:
            print(
                'File %s already exists, skipping download... (use force=True to override).'
                % filename)
Exemplo n.º 5
0
    def write(self, args: Dict[str, Any]) -> None:
        """
        writes out the logged value along with its arguments.
        :param args: the namespace of the current experiment
        """
        for cc in useless_args:
            if cc in args:
                del args[cc]

        columns = list(args.keys())

        new_cols = []
        for i, acc in enumerate(self.accs):
            args['task' + str(i + 1)] = acc
            new_cols.append('task' + str(i + 1))

        columns = new_cols + columns

        create_if_not_exists(base_path() + "results/" + self.setting)
        create_if_not_exists(base_path() + "results/" + self.setting + "/" +
                             self.dataset)
        create_if_not_exists(base_path() + "results/" + self.setting + "/" +
                             self.dataset + "/" + self.model)

        write_headers = False
        path = base_path() + "results/" + self.setting + "/" + self.dataset\
               + "/" + self.model + "/mean_accs.csv"
        if not os.path.exists(path):
            write_headers = True
        with open(path, 'a') as tmp:
            writer = csv.DictWriter(tmp, fieldnames=columns)
            if write_headers:
                writer.writeheader()
            writer.writerow(args)

        if self.setting == 'class-il':
            create_if_not_exists(base_path() + "results/task-il/" +
                                 self.dataset)
            create_if_not_exists(base_path() + "results/task-il/" +
                                 self.dataset + "/" + self.model)

            for i, acc in enumerate(self.accs_mask_classes):
                args['task' + str(i + 1)] = acc
            write_headers = False
            path = base_path() + "results/task-il" + "/" + self.dataset + "/"\
                   + self.model + "/mean_accs.csv"
            if not os.path.exists(path):
                write_headers = True
            with open(path, 'a') as tmp:
                writer = csv.DictWriter(tmp, fieldnames=columns)
                if write_headers:
                    writer.writeheader()
                writer.writerow(args)
def download_forecast_data(target_date, force=False):
    """Download forecast data for the given target date.

    If force is False, data is only downloaded if it doesn't already exist.
    """

    start_date = end_date = target_date
    target_date_formatted = start_date.strftime(DATE_FORMAT)
    folder = os.path.join(settings.DATA_FOLDER, 'MyOcean', 'Forecast')
    create_if_not_exists(folder)

    datasets = [
        {   # chlorophile, nitrate, phosphate, oxygen...
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_BIO_006_006-TDS',
            'product': 'myov04-med-ogs-bio-an-fc',
            'time': '12:00:00',
        },
        {   # salinity
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_PHYS_006_001_a-TDS',
            'product': 'myov05-med-ingv-sal-an-fc-dm',
            'time': '00:00:00'
        },
        {   # temperature
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_PHYS_006_001_a-TDS',
            'product': 'myov05-med-ingv-tem-an-fc-dm',
            'time': '00:00:00'
        },
        {   # temperature
            'service': 'http://purl.org/myocean/ontology/service/database#MEDSEA_ANALYSIS_FORECAST_PHYS_006_001_a-TDS',
            'product': 'myov05-med-ingv-cur-an-fc-dm',
            'time': '00:00:00',
            'variables': ['vozocrtx', 'vomecrty']
        }
    ]

    for dataset in datasets:
        filename = '%s-%s.nc' % (dataset['product'], target_date_formatted)
        if not exists(filename, folder) or force:
            try:
                download_myocean_data(
                    service=dataset['service'],
                    product=dataset['product'],
                    variables=dataset.get('variables'),
                    time_start='%s %s' % (start_date, dataset['time']),
                    time_end='%s %s' % (end_date, dataset['time']),
                    folder=folder,
                    filename=filename)
            except MaxTriesExceededException:
                logger.error("Download of file {} from MyOcean failed.".format(filename))

        else:
            print('File %s already exists, skipping download... (use force=True to override).' % filename)
Exemplo n.º 7
0
def test_four():
    config = utils.load_config()
    dermis = inputs.SkinData(config['data_dir'], 'dermis')
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()
        image_ph = tf.placeholder(dtype=tf.float32, shape=(None, None, 3))
        images = tf.expand_dims(image_ph, axis=0)
        net = FCN(images, net_params=config['net_params'])

        h, w = tf.shape(image_ph)[0], tf.shape(image_ph)[1]
        upscore = tf.image.resize_images(net.endpoints['conv4'], size=(h, w))
        prob_one = tf.nn.sigmoid(upscore)
        prob_zero = 1 - prob_one
        probs = tf.concat([prob_zero, prob_one], axis=3)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess,
                          tf.train.latest_checkpoint(config['train_dir']))
            logger.info('Model at step-%d restored successfully!' %
                        sess.run(global_step))
            utils.create_if_not_exists(config['save_path'])
            total_count = 0
            true_count = 0
            for i, (image,
                    label) in enumerate(zip(dermis.images, dermis.labels)):
                prep_image = image_prep_for_test(image)
                probs_o = np.squeeze(
                    sess.run(probs, feed_dict={image_ph: prep_image}))
                cnn_result = np.argmax(probs_o, axis=2)
                accuracy_before, _, _ = metric_accuracy(cnn_result, label)
                cnn_crf_result = crf_post_process(image, probs_o)
                accuracy_i, true_count_i, total_count_i = metric_accuracy(
                    cnn_crf_result, label)
                true_count += true_count_i
                total_count += total_count_i

                ss = 'DOWN' if accuracy_before > accuracy_i else 'UP'
                path = os.path.join(config['save_path'],
                                    dermis.listing[i].split('/')[-1])
                path = '{:<6} ({:.3f}) ({:.3f}) {}.jpg'.format(
                    path, accuracy_before, accuracy_i, ss)
                save_all_two(image, label, cnn_result, cnn_crf_result, path)

                if i % 5 == 0:
                    logger.info(
                        'Image-%d accuracy before(%.3f) after(%.3f) %s' %
                        (i, accuracy_before, accuracy_i, ss))

            accuracy = true_count * 1.0 / total_count
            logger.info('Accuracy after crf: %.3f' % accuracy)
def predict_forecast(target_date, force=False):
    """Predict the presence of medusae using a previously calibrated model.

    If force is False, it is only predicted the output file doesn't already exist.
    """
    for a in settings.TUNEZ_DATA:
        filename = '{}EF-{}.csv'.format(a, target_date.strftime(DATE_FORMAT))
        folder = os.path.join(settings.DATA_FOLDER, 'Projections')
        create_if_not_exists(folder) 
        if not exists(filename, folder) or force:
            os.chdir(settings.DATA_FOLDER)
            with open(os.path.join(BASE_DIR, 'R', 'Tunisia_MedJellyRisk_{}.R'.format(a)), 'r') as inputfile:
                call(["R", "--no-save",
                      "--args", target_date.strftime(DATE_FORMAT)], stdin=inputfile)
        else:
            print('\nFile %s already exists, skipping prediction... (use force=True to override).' % filename)
def preprocess_forecast_data(target_date, force=False):
    """Preprocess forecast environmental data from MyOcean using R.

    If force is False, it is only preprocessed if the output file doesn't already exist.
    """
    filename = 'Forecast_Env-%s.csv' % target_date.strftime(DATE_FORMAT)
    folder = os.path.join(settings.DATA_FOLDER, 'MyOcean', 'Forecast')
    create_if_not_exists(folder)

    if not exists(filename, folder) or force:
        os.chdir(os.path.join(settings.DATA_FOLDER))
        with open(os.path.join(BASE_DIR, 'R', 'ExtractData_MyOcean.R'), 'r') as inputfile:
            call(["R", "--no-save",
                  "--args", target_date.strftime(DATE_FORMAT)], stdin=inputfile)
    else:
        print('\nFile %s already exists, skipping preprocessing... (use force=True to override).' % filename)
Exemplo n.º 10
0
def preprocess_forecast_data(target_date, force=False):
    """Preprocess forecast environmental data from MyOcean using R.

    If force is False, it is only preprocessed if the output file doesn't already exist.
    """
    filename = 'Forecast_Env-%s.csv' % target_date.strftime(DATE_FORMAT)
    folder = os.path.join(settings.DATA_FOLDER, 'MyOcean', 'Forecast')
    create_if_not_exists(folder)

    if not exists(filename, folder) or force:
        os.chdir(os.path.join(settings.DATA_FOLDER))
        with open(os.path.join(BASE_DIR, 'R', 'ExtractData_MyOcean.R'),
                  'r') as inputfile:
            call([
                "R", "--no-save", "--args",
                target_date.strftime(DATE_FORMAT)
            ],
                 stdin=inputfile)
    else:
        print(
            '\nFile %s already exists, skipping preprocessing... (use force=True to override).'
            % filename)
Exemplo n.º 11
0
def predict_forecast(target_date, force=False):
    """Predict the presence of medusae using a previously calibrated model.

    If force is False, it is only predicted the output file doesn't already exist.
    """
    for a in settings.TUNEZ_DATA:
        filename = '{}EF-{}.csv'.format(a, target_date.strftime(DATE_FORMAT))
        folder = os.path.join(settings.DATA_FOLDER, 'Projections')
        create_if_not_exists(folder)
        if not exists(filename, folder) or force:
            os.chdir(settings.DATA_FOLDER)
            with open(
                    os.path.join(BASE_DIR, 'R',
                                 'Tunisia_MedJellyRisk_{}.R'.format(a)),
                    'r') as inputfile:
                call([
                    "R", "--no-save", "--args",
                    target_date.strftime(DATE_FORMAT)
                ],
                     stdin=inputfile)
        else:
            print(
                '\nFile %s already exists, skipping prediction... (use force=True to override).'
                % filename)
Exemplo n.º 12
0
def update_backup(commands):
    if (commands.virtual_drive):
        # Если диск V уже занят, смонтирует диск на указаной букве
        virtual_drive = commands.virtual_drive
    else:
        virtual_drive = DEFAULT_VIRTUAL_DRIVE

    if (not virtual_drive.endswith(':')):
        virtual_drive = virtual_drive + ':'

    if (commands.name):
        volume = utils.find_path_to_volume_by_backup_name(
            commands.name, program_directory)

    elif (commands.volume):
        volume = commands.volume

    else:
        return Font.YELLOW + '[!] Введите имя бэкапа или путь к тому бэкапа для обновления'

    if (commands.password):
        if (len(commands.password) < MIN_PASSWORD_LENGTH):
            return Font.YELLOW + '[!] Пароль слишком короткий. Минимум 25 символов'
    else:
        return Font.YELLOW + '[!] Пароль не найден'

    if (utils.volume_is_mount(virtual_drive)):
        print(Font.YELLOW +
              '[!] Том уже смонтирован или диск с таким именем уже существует')
    else:
        # Монтирование тома
        if (not utils.mount_veracrypt_volume(DEFAULT_VERACRYPT_PATH, volume,
                                             commands.password,
                                             virtual_drive)):
            return Font.YELLOW + '[!] Возникла ошибка при монтировании тома'

    # Проверка смонтированого тома на наличие нужных файлов бэкапа
    if (not utils.is_backup_drive(virtual_drive)):
        return Font.YELLOW + '[i] Диск не является бэкапом'

    print(Font.YELLOW + '[i] Загрузка старых метаданных...')
    try:
        backup_metadata = utils.load_metadata_from_json(
            os.path.join(virtual_drive, 'metadata.json'))
    except utils.CastomException as exc:
        return Font.YELLOW + exc
    except Exception as exc:
        return Font.YELLOW + '[!] Возникла непредвиденая ошибка: %s' % exc

    metadata = backup_metadata['metadata']
    backup_name = backup_metadata['backup_name']
    backup_directory = backup_metadata['directory']
    compression_level = backup_metadata['compression_level']
    amount_files_in_backup = backup_metadata['amount_files_in_backup']

    last_filelist = utils.read_file(os.path.join(
        virtual_drive, 'filelist.txt'))  # Загрузка старого списка файлов

    if (commands.blacklist):
        blacklist = utils.read_file(commands.blacklist)
    else:
        blacklist = utils.read_file(
            os.path.join(virtual_drive,
                         'blacklist.txt'))  # Загрузка старого черного списка

    if (commands.recursion_level):
        max_recursion_level = commands.recursion_level
    else:
        max_recursion_level = backup_metadata['recursion_level']
    ''' ЗАГРУЗКА СТАРЫХ И ПОЛУЧЕНИЕ НОВЫХ ДАННЫХ, ИХ СРАВНИВАНИЕ, ОТСЛЕЖИВАНИЕ ТИПА ИЗМЕНЕНИЙ И РАСПРЕДИЛЕНИЕ ИХ ПО СООТВЕТСТВУЮЩИХ СПИСКАХ'''
    print(Font.YELLOW + '[i] Начало сбора данных...')
    # Получение списка файлов находящихся в папке бэкапа
    new_filelist = collect_backup_files(backup_directory, blacklist,
                                        max_recursion_level)
    print(Font.YELLOW + '[i] Найдено файлов: %i шт.' % len(new_filelist))

    # Сравнивание списков файло для поика изменений
    deleted_files, appended_files = utils.cmp_lists(last_filelist,
                                                    new_filelist)
    # КОСТЫЛЬ. Удаляем со списка удаленных нашу "подставную папку" посколькуон находиться в директории программы
    # а не в директории для архивации, что в свою очередь скрипт расценит это как удаление файла
    if (IGNORED_EMPTY_DIR in deleted_files):
        deleted_files.remove(IGNORED_EMPTY_DIR)

    # Отслеживание типа изменений и также изменение списка удаленных файлов
    changes_list = utils.identify_changes(metadata, appended_files,
                                          deleted_files)

    # Добавление метаданных новых файлов
    for change in changes_list:
        parent, filename, status = change
        if (not parent and not status):
            metadata.update({filename: get_information(filename)})

    # Проверка на наличие обновлений только существующих файлов с архива
    updated_files = []
    for filename in metadata:
        if (os.path.exists(filename)):
            if (os.stat(filename).st_mtime > metadata[filename]['st_mtime']):
                updated_files.append(filename)
                metadata.update({filename: get_information(filename)})
                if (commands.verbosity):
                    print(Font.GREEN + '[>] Файл %s нужно обновить' % filename)

    for data in changes_list:
        if (all(data)):
            updated_files.append(data[0])
            if (commands.verbosity):
                print(Font.GREEN + '[>] Файл %s %s в %s' %
                      (data[0], CHANGES_STATUSES.get(data[2], 'Неизвестно'),
                       data[1]))

    for file in deleted_files:
        if (commands.verbosity):
            print(Font.GREEN + '[>] Файл %s был удален!' % file)
    '''КОНЕЦ ЭТОГО БЛОКА'''

    # Поик копий, перемещений и переименований файлов для создания зависимостей для уменьшения
    appended_files = optimize_metadata(changes_list, metadata)

    amount_appended_files = len(appended_files)
    amount_updated_files = len(updated_files)
    amount_deleted_files = len(deleted_files)

    amount_changes = amount_updated_files + amount_appended_files + amount_deleted_files

    if (amount_changes >= MIN_AMOUNT_CHANGES_TO_UPDATE or commands.force):

        if (amount_appended_files > 0):
            print(Font.CYAN +
                  '[i] Добавлено файлов: %i шт.' % amount_appended_files)

        if (amount_updated_files > 0):
            print(Font.CYAN +
                  '[i] Обновлено файлов: %i шт.' % amount_updated_files)

        if (amount_deleted_files):
            print(Font.CYAN +
                  '[i] Удалено файлов: %i шт.' % amount_deleted_files)

        # Создание папки для бэкапа
        utils.create_if_not_exists(os.path.join(virtual_drive, 'updates'))
        # Если будем делать обновление бэкапа, сохраняем старые данные и список изменений для возможности отката изменений
        updates_directory = os.path.join(
            virtual_drive, 'updates',
            time.ctime(time.time()).replace(':', '-'))
        # Проверка наличия папки для сохранения старых метаданных
        utils.create_if_not_exists(updates_directory)
        # Сохранение, старых метаданных. Зачем? Хз :D
        try:
            utils.dump_metadata_to_json(
                os.path.join(updates_directory, 'metadata.json'),
                backup_metadata)
            utils.dump_metadata_to_txt(
                os.path.join(updates_directory, 'filelist.txt'), last_filelist)
            utils.dump_metadata_to_json(
                os.path.join(updates_directory, 'changes.json'), changes_list)
        except utils.CastomException as exc:
            print(Font.YELLOW + exc)

        # Обновление файлов в архиве
        if (updated_files):
            asigned_updated_files = utils.asign_unf(updated_files, metadata)
            utils.update_files(virtual_drive, backup_name, compression_level,
                               asigned_updated_files, Font)
        # Добавление новых файлов в архив
        if (appended_files):
            asigned_appended_files = utils.asign_unf(appended_files, metadata)
            utils.compress_files(virtual_drive, backup_name, compression_level,
                                 asigned_appended_files, Font)
        # Запись в метаданные метки, что файл удален
        if (deleted_files):
            utils.set_flags_is_deleted_files(metadata, deleted_files)

        backup_metadata.update({
            'last_update':
            time.ctime(time.time()),
            'amount_appended_filse':
            amount_appended_files,
            'amount_updated_files':
            amount_updated_files,
            'amount_deleted_files':
            amount_deleted_files,
            'amount_files_in_backup':
            amount_files_in_backup + amount_appended_files -
            amount_deleted_files,
            'metadata':
            metadata,
        })

        update_backup_metadata(virtual_drive, backup_metadata, new_filelist,
                               blacklist)
        auto_dismount_veracrypt_volume_or_open_backup_drive(
            commands, virtual_drive)

        return Font.CYAN + '[>] Бэкап успешно обновлен!'

    else:
        return Font.YELLOW + '[!] Бэкап не требует обовления'
Exemplo n.º 13
0
# и получить некоторые файлы, лучше сначала запустить сессию указав имя бэкапа или путь к тому и пароль
# session -p 1234567890123456788901234567890ABCDEF -v "volume.hc"
# иначе при каждом действии нужно будет вводить эти данные
session = {
    '--password': None,
    '--volume': None,
    '--name': None,
    '--virtual_drive': None,
}

# Путь к корневой папке програмы, нужен для размещения файла с данными о полным пути к файлу бэкапа и его имени
# Нужно для того, чтобы при обновлении бэкапа не указывать путь к нему, а всего лить имя ПРИМЕР -update -n BACKUP_1
program_directory = os.path.dirname(sys.argv[0]).replace('/', '\\')
#
IGNORED_EMPTY_DIR = os.path.join(program_directory, EMPTY_DIR)
utils.create_if_not_exists(IGNORED_EMPTY_DIR)

#
Font = colorama.Fore if STYLE == 0 else colorama.Back


# Если установлен флажок --o/-open открывает в explorer бэкап и запрещает автоматическое размонтирование
def auto_dismount_veracrypt_volume_or_open_backup_drive(
        commands, virtual_drive):
    if (commands.open):
        print(Font.YELLOW + '[i] Открытие папки бэкапа...')
        utils.open_backup_drive(virtual_drive)
    else:
        print(Font.YELLOW + '[i] Начало размонтирования тома...')
        utils.dismount_veracrypt_volume(DEFAULT_VERACRYPT_PATH, virtual_drive)