コード例 #1
0
ファイル: semac_back.py プロジェクト: ThiagoLeal11/semac-bot
    def revert(self):
        self.log('Reverting to the previous version')

        os.chdir(rootpath)
        if not os.path.exists(rootpath + '/reverted'):
            self.run_command('mkdir reverted')

        # Get actual version
        version = read_from_file('actual_version')

        # Identify actual versions and move then to reverted/ folder
        self.run_command(f'mv current {version}')
        self.run_command(f'mv venv venv{version}')
        self.run_command(f'mv {version} reverted')
        self.run_command(f'mv venv{version} reverted')

        # Get previous version
        version = calc_previous_version(version)

        # Swap version
        self.run_command(f'mv {version} current')
        self.run_command(f'mv venv{version} venv')

        # Update actual version
        write_on_file('actual_version', version)

        # Restart the server
        self.log('Restarting the server')
        self.run_command('sudo systemctl restart apache2')

        # Everything worked
        self.log(get_finished_fallback())
        self.log(get_rollback_fallback(version))
コード例 #2
0
ファイル: semac_back.py プロジェクト: ThiagoLeal11/semac-bot
    def deploy(self):
        # Create the new folder
        os.chdir(rootpath)
        self.log('Create new folder')

        if os.path.exists(rootpath + '/new'):
            os.system('sudo rm -r new')

        self.run_command('mkdir new')
        os.chdir(rootpath + '/new')

        try:
            # Clone repository
            self.log('Clone repo')
            self.run_command(f'git clone https://{USER}:{PASS}@{VS_URL} .')

            # Copy variables to new folder
            os.chdir(rootpath)
            self.log('Copy variables.py')
            self.run_command('cp variables.py new/')

            # Create new env
            os.chdir(rootpath + '/new')
            self.log('Create new virtual env')
            self.run_command('virtualenv venv --python=python3.6')

            # Install dependencies
            self.log('Install dependencies')
            self.run_command('venv/bin/pip install -r requirements.txt')

            # Run migrations
            self.log('Run migrations')
            self.run_command('venv/bin/python3 manage.py migrate')

            # Temporary rename venv to change her directory
            os.chdir(rootpath + '/new')
            self.run_command('mv venv new_venv')
            self.run_command('mv new_venv ./..')

        except Exception:
            # Delete new folder
            os.chdir(rootpath)
            self.log('Reverting...')
            os.system('sudo rm -r new')
            os.system('sudo rm -r new_venv')

            return None

        # All right, swap versions
        self.log('Updating versions')

        # Get actual name folder
        os.chdir(rootpath)
        version = read_from_file('last_version')

        # Rename folders
        self.run_command(f'mv current {version}')
        self.run_command('mv new current')
        self.run_command(f'mv venv venv{version}')
        self.run_command('mv new_venv venv')

        # Get next version
        version = calc_next_version(version)

        # Update version control file
        write_on_file('actual_version', version)
        write_on_file('last_version', version)

        # Restart server
        self.log('Restarting the server')
        self.run_command('sudo systemctl restart apache2')

        # Everything worked
        self.log(get_finished_fallback())
        self.log(get_update_fallback(version))
コード例 #3
0
ファイル: main.py プロジェクト: valeriomieuli/LLwE_bayesian
}
expert_epochs = 25
########################################################################################

features_mean = np.load(os.path.join(os.getcwd(),
                                     str(data_size) + '_mean.npy'))
features_std = np.load(os.path.join(os.getcwd(), str(data_size) + '_std.npy'))
features_extractor = utils_models.build_features_extractor(
    features_extractor_name, data_shape)

for task_id, dataset in enumerate(datasets):
    if task_id == 0:
        open_mode = 'w'
    else:
        open_mode = 'a'
    utils.write_on_file(result_filename, open_mode,
                        "[%s] Starting new task..." % dataset.upper())
    '''X_train, y_train, X_valid, y_valid = utils_data.load_data(data_dir=data_dir, dataset=dataset, data_size=data_size,
                                                          phase='train_valid', valid_split=valid_split, seed=seed)'''
    X_train, y_train, X_valid, y_valid, _, _ = utils_data.load_data(
        data_dir=data_dir,
        dataset=dataset,
        data_size=data_size,
        valid_split=valid_split,
        test_split=test_split,
        seed=seed)
    ############################################ TRAINING ############################################
    utils.write_on_file(
        result_filename, 'a',
        "[%s] Starting autoencoder's cross-validation..." % dataset.upper())
    autoencoder = utils.autoencoder_cross_validation(
        features_extractor, batch_size, X_train, X_valid, features_mean,
コード例 #4
0
ファイル: semac_front.py プロジェクト: ThiagoLeal11/semac-bot
    def deploy(self):
        # Create the new folder
        os.chdir(rootpath)
        self.log('Create new folder')

        if os.path.exists(rootpath + '/new'):
            os.system('sudo rm -r new')

        self.run_command('mkdir new')
        os.chdir(rootpath + '/new')

        try:
            # Clone repository
            self.log('Clone repo')
            self.run_command(f'git clone https://{USER}:{PASS}@{VS_URL} .')

            # Install dependencies and compile
            self.log('Install dependencies')
            self.run_command('npm install')

            # Compiling js
            self.log('Compile js')
            self.run_command('polymer build')

        except Exception:
            # Delete new folder
            os.chdir(rootpath)
            self.log('Reverting...')
            os.system('sudo rm -r new')

            return None

        # Move folder to destination
        self.log('Moving folder to /var/www/html/Site')
        self.run_command(f'mv build/es5-bundled {serverpath}')

        # Clean folder
        os.chdir(rootpath)
        self.log('Clean the new build folder')
        os.system('sudo rm -r new')

        # Go to destination path
        os.chdir(serverpath)

        # Get actual name folder
        version = read_from_file('last_version')

        # Rename folders
        self.log('Updating versions')
        self.run_command(f'mv current {version}')
        self.run_command('mv es5-bundled current')

        # Get next version
        version = calc_next_version(version)

        # Update version control file
        write_on_file('actual_version', version)
        write_on_file('last_version', version)

        # Restart server
        self.log('Restarting the server')
        self.run_command('sudo systemctl restart apache2')

        # Everything worked
        self.log(get_finished_fallback())
        self.log(get_update_fallback(version))
コード例 #5
0
        'name': soup.find(itemprop='name').text,
        'price': float(soup.find(itemprop='price').attrs['content'])
    }

    print(f'{i}, {product}')


def get_pool(n_th: int):
    """Retorna um número n de Threads."""
    return [
        Worker(target=get_product_info, queue=queue, name=f'Worker {n}')
        for n in range(n_th)
    ]


if __name__ == '__main__':
    start = time()

    get_urls('https://www.dafiti.com.br/calcados-masculinos/botas/')

    #print(queue.queue)
    thrs = get_pool(8)

    # print('starts')
    [th.start() for th in thrs]

    # print('joins')
    [th.join() for th in thrs]

    write_on_file(os.path.basename(sys.argv[0]), time() - start)
コード例 #6
0
}

data_dir = sys.argv[1]
data_size = int(sys.argv[2])
ds = sys.argv[3]
bayesian_model_name = sys.argv[4]
result_filename = "result.txt"
mc_samples = 100
batch_size = 4096
bayesians_dir = '/home/vmieuli/bayesians/'
########################################################################################

bayesian_file = '%s_%d_%s_bayesian.h5' % (ds, data_size, bayesian_model_name)
utils.write_on_file(result_filename,
                    'w',
                    "[%s] Loading bayesian model: %s ..." %
                    (ds.upper(), bayesian_file),
                    new_line=False)
bayesian_model = load_model(os.path.join(bayesians_dir, bayesian_file))
utils.write_on_file(result_filename, 'a', " Done")

all_std_uncertainties = {dataset: [] for dataset in datasets}
all_aleatoric_uncertainties = {dataset: [] for dataset in datasets}
all_epistemic_uncertainties = {dataset: [] for dataset in datasets}

# n_samples = min([len(os.listdir(os.path.join(data_dir, dataset))) for dataset in datasets])
for dataset in datasets:
    files = os.listdir(os.path.join(data_dir, dataset))
    '''random.shuffle(files)
    files = files[:n_samples]'''
コード例 #7
0
autoencoder_epochs = 25
########################################################################################

features_mean = np.load(os.path.join(os.getcwd(),
                                     str(data_size) + '_mean.npy'))
features_std = np.load(os.path.join(os.getcwd(), str(data_size) + '_std.npy'))
features_extractor = utils_models.build_features_extractor(
    model_name=features_extractor_name, input_shape=data_shape)

for task_id, dataset in enumerate(datasets):
    if ds == dataset:
        if task_id == 0:
            open_mode = 'w'
        else:
            open_mode = 'a'
        utils.write_on_file(result_filename, open_mode,
                            "[%s] Loading data..." % dataset.upper())
        print("[%s] Loading data..." % dataset.upper())

        X_train, _, X_valid, _, _, _ = utils_data.load_data(
            data_dir=data_dir,
            dataset=dataset,
            data_size=data_size,
            valid_split=valid_split,
            test_split=test_split,
            seed=seed)
        utils.write_on_file(
            result_filename, 'a',
            "[%s] Starting autoencoder's cross-validation..." %
            dataset.upper())
        print("[%s] Starting autoencoder's cross-validation..." %
              dataset.upper())
コード例 #8
0
features_extractor_name = 'VGG16'
autoencoder_hidden_layer_sizes = [50, 100, 200]
autoencoder_weight_decays = [-1, 0.005, 0.0005]
autoencoder_learning_rates = [0.1, 0.01, 0.001]
autoencoder_epsilons = [1e-07, 1e-08]
autoencoder_objective_loss = 'binary_crossentropy'
autoencoder_epochs = 20
########################################################################################

features_mean = np.load(os.path.join(os.getcwd(), str(data_size) + '_mean.npy'))
features_std = np.load(os.path.join(os.getcwd(), str(data_size) + '_std.npy'))
features_extractor = utils_models.build_features_extractor(model_name=features_extractor_name, input_shape=data_shape)

for task_id, dataset in enumerate(datasets):
    if ds == dataset:
        utils.write_on_file(result_filename, 'w', "[%s] Loading data..." % dataset.upper())
        print("[%s] Loading data..." % dataset.upper())
        X_train, _, X_valid, _, _, _ = utils_data.load_data(data_dir=data_dir,
                                                            dataset=dataset,
                                                            data_size=data_size,
                                                            valid_split=valid_split,
                                                            test_split=test_split,
                                                            seed=seed)

        utils.write_on_file(result_filename, 'a', "[%s] Starting autoencoder's cross-validation..." % dataset.upper())
        print("[%s] Starting autoencoder's cross-validation..." % dataset.upper())
        autoencoder = utils.autoencoder_cross_validation(features_extractor=features_extractor,
                                                         batch_size=batch_size, X_train=X_train, X_valid=X_valid,
                                                         features_mean=features_mean, features_std=features_std,
                                                         hidden_layer_sizes=autoencoder_hidden_layer_sizes,
                                                         weight_decays=autoencoder_weight_decays,
コード例 #9
0
result_filename = "result.txt"
data_shape = (data_size, data_size, 3)
batch_size = 32
data_augmentation = False

weight_decays = [-1, 0.005]
learning_rates = [0.01, 0.001]
epsilons = [1e-07, 1e-08]
dropout_rates = [0.25, 0.35, 0.5]
epochs = 70
########################################################################################

bayesian_model = None
for task_id, dataset in enumerate(datasets):
    if ds == dataset:
        utils.write_on_file(result_filename, 'w',
                            "[%s] Loading data..." % dataset.upper())
        print("[%s] Loading data..." % dataset.upper())
        X_train, y_train, X_valid, y_valid, X_test, y_test = utils_data.load_data(
            data_dir=data_dir,
            dataset=dataset,
            data_size=data_size,
            valid_split=valid_split,
            test_split=test_split,
            seed=seed)

        utils.write_on_file(result_filename, 'a',
                            "[%s] Normalizing data..." % dataset.upper())
        print("[%s] Normalizing data..." % dataset.upper())
        X_train = utils.normalize_data(base_model_name=base_model_name,
                                       X=X_train)
        X_valid = utils.normalize_data(base_model_name=base_model_name,
コード例 #10
0
expert_name = sys.argv[7]

result_filename = "result.txt"
data_shape = (data_size, data_size, 3)
batch_size = 16
data_augmentation = False

expert_weight_decays = [-1, 0.005, 0.0005]
expert_learning_rates = [0.01, 0.001, 0.0001]
expert_epsilons = [1e-07, 1e-08]
expert_epochs = 30
########################################################################################

for task_id, dataset in enumerate(datasets):
    if ds == dataset:
        utils.write_on_file(result_filename, 'w', "[%s] Loading data..." % dataset.upper())
        print("[%s] Loading data..." % dataset.upper())

        X_train, y_train, X_valid, y_valid, X_test, y_test = utils_data.load_data(data_dir=data_dir,
                                                                                  dataset=dataset,
                                                                                  data_size=data_size,
                                                                                  valid_split=valid_split,
                                                                                  test_split=test_split,
                                                                                  seed=seed)

        utils.write_on_file(result_filename, 'a', "[%s] Normalizing data..." % dataset.upper())
        print("[%s] Normalizing data..." % dataset.upper())
        for X in [X_train, X_valid, X_test]:
            X = (X - X.mean(axis=(0, 1, 2), keepdims=True)) / X.std(axis=(0, 1, 2), keepdims=True)

        utils.write_on_file(result_filename, 'a', "[%s] Starting expert's cross-validation..." % dataset.upper())
コード例 #11
0
ファイル: experts.py プロジェクト: valeriomieuli/LLwE
base_model_name = sys.argv[7]

result_filename = "result.txt"
data_shape = (data_size, data_size, 3)
batch_size = 16
data_augmentation = False

expert_weight_decays = [-1, 0.005, 0.0005]
expert_learning_rates = [0.01, 0.001, 0.0001]
expert_epsilons = [1e-07, 1e-08]
expert_epochs = 3
########################################################################################

for task_id, dataset in enumerate(datasets):
    if ds == dataset:
        utils.write_on_file(result_filename, 'w',
                            "[%s] Loading data..." % dataset.upper())
        print("[%s] Loading data..." % dataset.upper())
        X_train, y_train, X_valid, y_valid, X_test, y_test = utils_data.load_data(
            data_dir=data_dir,
            dataset=dataset,
            data_size=data_size,
            valid_split=valid_split,
            test_split=test_split,
            seed=seed)

        utils.write_on_file(result_filename, 'a',
                            "[%s] Normalizing data..." % dataset.upper())
        print("[%s] Normalizing data..." % dataset.upper())
        X_train = utils.normalize_data(base_model_name=base_model_name,
                                       X=X_train)
        X_valid = utils.normalize_data(base_model_name=base_model_name,