Esempio n. 1
0
def _england_pipeline(DEBUG = True):
    prefix = get_prefix(DEBUG)
    def rename_subfolders(path):
        for folder_name in os.listdir(path):
            cmd = f"mv {os.path.join(path, folder_name)} {os.path.join(path, folder_name[2:])}"
            print(cmd)
            if not DEBUG:
                os.system(cmd)
    rename_subfolders(config.ENGLAND_BIRMINGHAM_RAW)
    rename_subfolders(config.ENGLAND_COVENTRY_RAW)
    rename_subfolders(config.ENGLAND_LIVERPOOL_RAW)
    rename_subfolders(config.ENGLAND_PEAK_RAW)

    rename_many(
        [
            (1, 37, config.ENGLAND_BIRMINGHAM_RAW),
            (2,  3, config.ENGLAND_COVENTRY_RAW),
            (5, 13, config.ENGLAND_COVENTRY_RAW),
            (15,18, config.ENGLAND_COVENTRY_RAW),
            (1, 41, config.ENGLAND_LIVERPOOL_RAW),
            (1, 14, config.ENGLAND_PEAK_RAW),
        ],
        dst = config.ENGLAND_DATA,
        DEBUG=DEBUG,
        prefix=''
    )
    resize(
        src = config.ENGLAND_DATA,
        dst = config.ENGLAND_960x720,
        x = 960,
        y = 720,
        DEBUG = DEBUG,
        prefix= prefix
    )
    print(train_val_divide(config.ENGLAND_960x720, train_percentage=0))
Esempio n. 2
0
 async def prefix(self,ctx):
     if ctx.invoked_subcommand is None:
         prefix = PrefixHandler.get_prefix(ctx.guild.id)
         embed = discord.Embed(title="Setting a custom prefix",color=0x467f05)
         if not prefix:
             prefix = get_prefix()
             embed.description = f"`{prefix}set <prefix>`"
Esempio n. 3
0
def qc_flow(indir, seq_type, trim_tso):
    stats_prefix = []
    prefix_list = get_prefix(indir + '/preproc.list')
    sample_list = get_prefix(indir + '/sample.list')
    for prefix in prefix_list:
        stats = qc_prefix(indir, prefix, seq_type, trim_tso)
        stats_prefix.append(map(int, list(stats)))
    stats_prefix = pd.DataFrame(stats_prefix, index=prefix_list)
    if trim_tso:
        stats_prefix.columns = [
            'raw', 'with_tso', 'trimmed', 'bc_parsed', 'splited'
        ]
    else:
        stats_prefix.columns = ['raw', 'trimmed', 'bc_parsed', 'splited']

    stats_sample = []
    for sample in sample_list:
        stats = qc_sample(indir, sample, seq_type)
        stats_sample.append(map(int, list(stats)))
    stats_sample = pd.DataFrame(stats_sample, index=sample_list)
    if seq_type == 'RNA':
        stats_sample.columns = [
            'total_reads', 'splited_reads', 'retrimmed', 'mapped',
            'usable_reads', 'total_umis', 'cell_filtered_umis',
            'mapped_to_gene'
        ]
    else:
        stats_sample.columns = [
            'total_reads', 'splited_reads', 'mapped', 'rm_chrM',
            'usable_reads', 'total_umis', 'cell_filtered_umis'
        ]
    frac_prefix = pd.DataFrame([
        stats_prefix[col] / stats_prefix['raw'] for col in stats_prefix.columns
    ],
                               index=stats_prefix.columns).T
    frac_prefix = frac_prefix.round(3)
    frac_sample_cols = stats_sample.columns[:5]
    frac_sample = pd.DataFrame([
        stats_sample[col] / stats_sample['total_reads']
        for col in frac_sample_cols
    ],
                               index=frac_sample_cols).T
    frac_sample = frac_sample.round(3)
    return stats_prefix, stats_sample, frac_prefix, frac_sample
Esempio n. 4
0
def standalone(b_name, f_name, bug_meta, failed_file):
    failed = 0

    f = {}
    f["buggy_file"] = b_name
    f["fixed_file"] = f_name

    err_b, err_f = utils.get_asts(basename(b_name), basename(f_name),
                                  args.input_folder, args.output_folder,
                                  bug_meta)

    if err_b or err_f:
        failed = 1
        utils.log_failure(failed_file, b_name, f_name)
        return None, failed

    ast_diff_fname = os.path.join(args.output_folder,
                                  utils.get_prefix(b_name) + "_ast_diff.txt")

    num_ast_diff = utils.get_ast_diff(basename(b_name), basename(f_name),
                                      ast_diff_fname, args.output_folder)

    bug_meta["files_changed"][0]["ast_diff_file"] = ast_diff_fname

    metadata = {}
    metadata["file"] = utils.get_prefix(b_name) + ".js"
    metadata["num_ast_diffs"] = num_ast_diff

    bug_meta["files_changed"][0]["metadata"] = metadata

    if num_ast_diff < 0:
        utils.log_failure(failed_file, b_name, f_name)
        failed = 1
        print("json diff failed")

    return bug_meta, failed
Esempio n. 5
0
def _swiss_pipeline(DEBUG=True):
    print("Move and rename Swiss images from different folders to a single folder using an unified naming.")
    prefix = get_prefix(DEBUG)
    rename_many(
        [
            (1  , 160, config.SWISS_1_RAW),
            (0  , 39 , config.SWISS_2_RAW),
            (1  , 113, config.SWISS_3_RAW),
        ],
        dst=config.SWISS_DATA,
        DEBUG=DEBUG,
        prefix=prefix
    )
    resize(config.SWISS_DATA, config.SWISS_960x720, x=960, y=720, DEBUG=DEBUG, prefix=prefix)
    # was 1280x720 for swiss
    print(train_val_divide(config.SWISS_960x720))
Esempio n. 6
0
def _suzhou_pipeline(DEBUG=True):
    print("Move and rename Suzhou images from different folders to a single folder using an unified naming.")
    prefix = get_prefix(DEBUG)
    rename_many(
        [
            (1  , 100, config.SUZHOU_1_RAW),
            (101, 200, config.SUZHOU_2_RAW),
            (201, 300, config.SUZHOU_3_RAW),
            (301, 343, config.SUZHOU_4_RAW),
        ],
        dst=config.SUZHOU_DATA,
        DEBUG=DEBUG,
        prefix=prefix
    )
    resize(config.SUZHOU_DATA, config.SUZHOU_960x720, x=960, y=720, DEBUG=DEBUG, prefix=prefix)
    # was 1280x720 for swiss
    print(train_val_divide(config.SUZHOU_960x720))
Esempio n. 7
0
def _full_pipeline(DEBUG=True):
    print("Move and rename all images from different folders to a single folder using an unified naming.")
    prefix = get_prefix(DEBUG)
    rename_many(
        [
            (1  , 20 , config.KUNSHAN_1_RAW),
            (1  , 49 , config.PARIS_1_RAW),
            (1  , 9  , config.SHENNONGJIA_1_RAW),
            (1  , 100, config.SUZHOU_1_RAW),
            (101, 200, config.SUZHOU_2_RAW),
            (201, 300, config.SUZHOU_3_RAW),
            (301, 343, config.SUZHOU_4_RAW),
            (1  , 160, config.SWISS_1_RAW),
            (0  , 39 , config.SWISS_2_RAW),
            (1  , 113, config.SWISS_3_RAW),
            (1  , 57 , config.WEIHAI_1_RAW),
            (1  , 69 , config.WUXI_1_RAW),
        ],
        dst=config.FULL_DATA,
        DEBUG=DEBUG,
        prefix=prefix
    )
    resize(config.FULL_DATA, config.FULL_960x720, x=960, y=720, DEBUG=DEBUG, prefix=prefix)
    print(train_val_divide(config.FULL_960x720))
Esempio n. 8
0
import aiohttp
from discord.ext import commands
from utils import get_prefix, convert_to_arabic_number, make_embed
from collections import OrderedDict
from dbhandler import create_connection, update_guild_translation, get_guild_translation

prefix = get_prefix()

INVALID_TRANSLATION = "**Invalid translation**. List of translations: <https://github.com/galacticwarrior9/is" \
                      "lambot/blob/master/Translations.md>"

INVALID_ARGUMENTS_ARABIC = "Invalid arguments! Do `{0}aquran [surah]:[ayah]`. Example: `{0}aquran 1:1`" \
                               "\nTo fetch multiple verses, do `{0}quran [surah]:[first ayah]-[last ayah]`" \
                               "\nExample: `{0}aquran 1:1-7`".format(prefix)

INVALID_ARGUMENTS_ENGLISH = "Invalid arguments! Do `{0}aquran [surah]:[ayah]`. Example: `{0}aquran 1:1`" \
                               "\nTo fetch multiple verses, do `{0}quran [surah]:[first ayah]-[last ayah]`" \
                               "\nExample: `{0}aquran 1:1-7`".format(prefix)

SQL_ERROR = "There was an error connecting to the database."

ICON = 'https://cdn6.aptoide.com/imgs/6/a/6/6a6336c9503e6bd4bdf98fda89381195_icon.png'


class QuranSpecifics:
    def __init__(self, ref, edition):
        self.edition = edition
        self.ordered_dict = OrderedDict()
        self.surah, self.offset, self.limit = self.process_ref(ref)
        self.quran_com = self.is_quran_com(edition)
Esempio n. 9
0
#!/usr/bin/python

import os, sys
import utils

GLIMMER_PATH = "../../pkg/glimmer3.02/bin/"
GLIMMER_ARGS = "-o 50 -g110 -t30"

for file in os.listdir(sys.argv[1]):
    if not file.endswith('.fna'):
        continue
    print file

    prefix = utils.get_prefix(file)

    cmd = "%s/glimmer3 %s %s/%s %s %s" % (
        GLIMMER_PATH, GLIMMER_ARGS, sys.argv[1], file, sys.argv[2], prefix)
    print cmd
    os.system(cmd)
Esempio n. 10
0
from typing import Optional

from discord.ext import commands

import database
import utils

prefix = utils.get_prefix()


class Search(commands.Cog):
    def __init__(self, bot):
        self.bot = bot

    @commands.command(aliases=["search", "search_r", "sr", "s"])
    async def search_reminders(self, ctx, date: Optional[str] = None):
        """Searches for reminders on a specific day"""
        if date:
            try:
                date = utils.split_date(date)
            except UnboundLocalError:
                await ctx.send(embed=utils.generate_embed(
                    "", "Date was not in the correct format."))
                return 1
            db_search = database.get_reminders(
                ctx.message.guild.id,
                **{
                    "year": date["year"],
                    "month": date["month"],
                    "day": date["day"]
                },
Esempio n. 11
0
    parser.add_argument("--kappa_smooth",
                        help="Kappa used for L0 smoothing",
                        type=float,
                        default=2.0)
    parser.add_argument("--lambda_smooth",
                        help="Lambda used for L0 smoothing",
                        type=float,
                        default=2e-2)
    parser.add_argument("--min_val",
                        help="Canny hysteresis min value",
                        type=float,
                        default=50)
    parser.add_argument("--max_val",
                        help="Canny hysteresis max value",
                        type=float,
                        default=200)
    args = parser.parse_args()

    # img_path = join(dir_path, "images", "pano", "image{:04d}.png".format(readout_idx))
    if not exists(args.path):
        exit(args.path, " could not be found")

    edges = edge_detection(args.path, args.kappa_smooth, args.lambda_smooth,
                           args.min_val, args.max_val)
    print(edges.shape, type(edges), edges[0].dtype, np.min(edges),
          np.max(edges))

    edges_path = get_prefix(args.path) + "_edges.jpg"
    cv2.imwrite(edges_path, edges)
    print("Edges saved to {}".format(edges_path))
Esempio n. 12
0
        locs_pix, locs_w = project_pointcloud_kitti(img, pts_array,
                                                    pts_edge_score, calib)

    if locs_pix is not None:

        # Visual verification
        fig = plt.figure()
        plt.imshow(img)
        plt.scatter(x=locs_pix[0, :],
                    y=locs_pix[1, :],
                    marker='o',
                    c='#f5784280',
                    lw=0,
                    s=locs_w * 2.0)
        fig.tight_layout()
        export_path = get_prefix(args.img_path) + "_" + basename(
            splitext(args.pcl_path)[0]) + "_" + (args.t if calib is None else
                                                 "calib.txt") + ".png"
        fig.savefig(export_path, dpi=250, bbox_inches='tight')
        print(export_path)

        # Overlay over gradient image
        fig = plt.figure()
        plt.imshow(edges, cmap='gray')  #  , vmin=0, vmax=255
        plt.scatter(x=locs_pix[0, :],
                    y=locs_pix[1, :],
                    marker='o',
                    c='#f5784280',
                    lw=0,
                    s=locs_w * 2.0)
        export_path = get_prefix(args.img_path) + "_edge" + "_" + basename(
Esempio n. 13
0
# merge RNA mtx
from utils import get_prefix, read_mtx, write_mtx
import sys
import pandas as pd

indir = sys.argv[1]
outdir = sys.argv[2]

prefix_list = get_prefix(indir + '/sample.list')

df_merge = pd.DataFrame()

for prefix in prefix_list:
    df = read_mtx(indir + '/DGE_filtered/' + prefix + '_RNA')
    df_merge = pd.merge(df_merge,
                        df,
                        left_index=True,
                        right_index=True,
                        how='outer')

df_merge = df_merge.fillna(0)
df_merge = df_merge.astype('int')

write_mtx(df_merge, outdir)
Esempio n. 14
0
    def __init__(self):
        gv.gshogi = self
        # set global variables for debug messages
        gv.verbose, gv.verbose_usi = utils.get_verbose()
        # prefix to find package files/folders
        self.prefix = utils.get_prefix()
        # set up .gshogi directory in the home directory
        self.gshogipath = utils.create_settings_dir()
        self.glade_dir = os.path.join(self.prefix, "glade")
        # get settings saved from previous game
        self.settings = utils.get_settings_from_file(self.gshogipath)

        self.ask_before_promoting = False
        self.gameover = False
        self.time_limit = "00:10"
        self.stopped = True
        self.quitting = False
        self.src = ""
        self.src_x = ""
        self.src_y = ""
        self.startpos = "startpos"
        self.start_stm = BLACK

        self.search_depth = 39
        self.thinking = False
        self.cmove = "none"
        self.movelist = []
        self.redolist = []
        self.player = ["Human", "gshogi"]
        self.pondermove = [None, None]

        opening_book_path = os.path.join(self.prefix, "data/opening.bbk")
        engine.init(opening_book_path, gv.verbose)

        gv.gui = gui.Gui()

        gv.pieces = pieces.Pieces()
        # custom pieceset path
        if self.settings != "":
            gv.pieces.set_custom_pieceset_path(
                self.settings.custom_pieceset_path)
        gv.pieces.load_pieces(self.prefix)

        # usiw is the instance that plays white (gote)
        # usib is the instance that plays black (sente)
        gv.usib = usi.Usi("b")
        gv.usiw = usi.Usi("w")

        # instantiate board, gui, classes
        gv.tc = time_control.Time_Control()
        gv.engine_manager = engine_manager.Engine_Manager()
        gv.board = board.Board()

        self.set_board_colours = set_board_colours.get_ref()
        # set colours to previous game (if any)
        if self.settings != "":
            self.set_board_colours.restore_colour_settings(
                self.settings.colour_settings)
        gv.gui.build_gui()
        gv.board.build_board()
        self.engine_output = engine_output.get_ref()

        # set level
        command = "level 0 " + self.time_limit
        engine.command(command)
        # turn off beeps
        if not BEEP:
            engine.command("beep")

        # restore users settings to values from previous game
        self.restore_settings(self.settings)

        gv.usib.set_engine(self.player[BLACK], None)
        gv.usiw.set_engine(self.player[WHITE], None)
        gv.gui.update_toolbar(self.player)

        self.move_list = move_list.get_ref()

        gv.tc.reset_clock()

        gv.gui.enable_go_button()
        gv.gui.disable_stop_button()

        self.stm = self.get_side_to_move()
        self.timer_active = False
        self.set_board_colours.apply_colour_settings()
Esempio n. 15
0
#!/usr/bin/python

import os, sys
import utils

GLIMMER_PATH = "../../pkg/glimmer3.02/bin/"
GLIMMER_ARGS = "-o 50 -g110 -t30"

for file in os.listdir(sys.argv[1]):
    if not file.endswith('.fna'):
        continue
    print file

    prefix = utils.get_prefix(file)

    cmd = "%s/glimmer3 %s %s/%s %s %s" % (GLIMMER_PATH, GLIMMER_ARGS, sys.argv[1], file, sys.argv[2], prefix)
    print cmd
    os.system(cmd)


Esempio n. 16
0
args = parser.parse_args()

# select gpu
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
dataset = ds.load_data(args)
dataset.get_validation()

# Construct model
if args.model not in models:
    raise Exception('Unknown model:', args.model)

model = models[args.model](input_shape=dataset.get_input_shape(),
                           output_shape=dataset.get_output_shape(),
                           output=args.output)

model.main_loop(dataset,
                epochs=args.epoch,
                batchsize=args.batchsize,
                reporter=['loss'],
                validation=args.val)

predict_result = model.predict(dataset.test_features)
loss = utils.euclidean_error(dataset.test_labels, predict_result)
sess = tf.Session()
loss_np = sess.run(loss)
loss_np = np.reshape(loss_np, (dataset.test_labels.shape[0], 1))
print('Test loss:\t', np.mean(loss_np), '\n')

utils.cdfplot(loss_np)
plt.savefig(model.get_filename(utils.get_prefix(args), 'losscdf', 'png'))
Esempio n. 17
0
    def __init__(self):
        gv.gshogi = self
        # set global variables for debug messages
        gv.verbose, gv.verbose_usi = utils.get_verbose()
        # prefix to find package files/folders
        self.prefix = utils.get_prefix()
        # set up .gshogi directory in the home directory
        self.gshogipath = utils.create_settings_dir()
        self.glade_dir = os.path.join(self.prefix, "glade")
        # get settings saved from previous game
        self.settings = utils.get_settings_from_file(self.gshogipath)

        self.ask_before_promoting = False
        self.gameover = False
        self.time_limit = "00:10"
        self.stopped = True
        self.quitting = False
        self.src = ""
        self.src_x = ""
        self.src_y = ""
        self.startpos = "startpos"
        self.start_stm = BLACK

        self.search_depth = 39
        self.thinking = False
        self.cmove = "none"
        self.movelist = []
        self.redolist = []
        self.player = ["Human", "gshogi"]
        self.pondermove = [None, None]

        opening_book_path = os.path.join(self.prefix, "data/opening.bbk")
        engine.init(opening_book_path, gv.verbose)

        gv.gui = gui.Gui()

        gv.pieces = pieces.Pieces()
        # custom pieceset path
        if self.settings != "":
            gv.pieces.set_custom_pieceset_path(
                self.settings.custom_pieceset_path)
        gv.pieces.load_pieces(self.prefix)

        # usiw is the instance that plays white (gote)
        # usib is the instance that plays black (sente)
        gv.usib = usi.Usi("b")
        gv.usiw = usi.Usi("w")

        # instantiate board, gui, classes
        gv.tc = time_control.Time_Control()
        gv.engine_manager = engine_manager.Engine_Manager()
        gv.board = board.Board()

        self.set_board_colours = set_board_colours.get_ref()
        # set colours to previous game (if any)
        if self.settings != "":
            self.set_board_colours.restore_colour_settings(
                self.settings.colour_settings)
        gv.gui.build_gui()
        gv.board.build_board()
        self.engine_output = engine_output.get_ref()

        # set level
        command = "level 0 " + self.time_limit
        engine.command(command)
        # turn off beeps
        if not BEEP:
            engine.command("beep")

        # restore users settings to values from previous game
        self.restore_settings(self.settings)

        gv.usib.set_engine(self.player[BLACK], None)
        gv.usiw.set_engine(self.player[WHITE], None)
        gv.gui.update_toolbar(self.player)

        self.move_list = move_list.get_ref()

        gv.tc.reset_clock()

        gv.gui.enable_go_button()
        gv.gui.disable_stop_button()

        self.stm = self.get_side_to_move()
        self.timer_active = False
        self.set_board_colours.apply_colour_settings()
Esempio n. 18
0
    if not os.path.isdir(args.output_folder):
        os.mkdir(args.output_folder)

    master_json = os.path.join(args.output_folder, "master_bug_metadata.json")

    processed_file = os.path.join(args.output_folder, "processed.txt")
    failed_samples_file = os.path.join(args.output_folder, "failed.txt")

    if not os.path.exists(failed_samples_file):
        open(failed_samples_file, "x").close()

    processed_pairs = utils.get_already_processed(processed_file)

    f_all = glob.glob(args.input_folder + "/*.js")

    is_processed_func = lambda p, e: utils.get_prefix(e[
        "files_changed"][0]["metadata"]["file"]) + ".js" in p if isinstance(
            e, dict) else utils.get_prefix(e) + ".js" in p
    pairs_to_process = utils.get_not_processed(f_all, processed_pairs,
                                               is_processed_func)

    if not pairs_to_process:
        print("Already processed entire folder")
        sys.exit(1)

    pairs = defaultdict(list)
    pool = mp.Pool(args.num_processes)

    #get pairs
    for f in pairs_to_process:
        prefix = utils.get_prefix(basename(f))
        pairs[prefix].append(f)
Esempio n. 19
0
import re
import discord
from utils import get_prefix, get_site_source
from discord.ext import commands
from aiohttp import ClientSession
import textwrap

HADITH_BOOK_LIST = ['bukhari', 'muslim', 'tirmidhi', 'abudawud', 'nasai',
                    'ibnmajah', 'malik', 'riyadussaliheen', 'adab', 'bulugh', 'qudsi',
                    'nawawi', 'shamail', 'ahmad']

ICON = 'https://sunnah.com/images/hadith_icon2_huge.png'

ERROR = 'The hadith could not be found on sunnah.com.'

PREFIX = get_prefix()

INVALID_INPUT = f'Invalid arguments! Please do `{PREFIX}hadith (book name)' \
                f'[(book number):(hadith number)|(raw hadith number)]` \n' \
                f'Valid book names are `{HADITH_BOOK_LIST}`'

URL_FORMAT = "https://sunnah.com/{}/{}"


class HadithGrading:
    def __init__(self):
        self.narrator = None
        self.grading = None
        self.arabicGrading = None

        self.book_number = None
Esempio n. 20
0
def main():
    opt = parse_opts()
    # Path configurations
    opt.annotation_path = os.path.join(opt.annotation_directory,
                                       opt.annotation_path)
    save_result_dir_name = \
        os.path.join(opt.result_path,
                     get_prefix() + '_{}{}_{}_epochs'.format(opt.model, opt.model_depth, opt.n_epochs))
    if not os.path.exists(save_result_dir_name):
        os.mkdir(save_result_dir_name)
    opt.result_path = os.path.join(opt.result_path, save_result_dir_name)

    # For data generator
    opt.scales = [opt.initial_scale]
    for epoch in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    opt.arch = '{}-{}'.format(opt.model, opt.model_depth)

    # Model
    model, parameters = generate_model(opt)
    # print(model)

    # Loss function
    criterion = nn.CrossEntropyLoss()
    if not opt.no_cuda:
        criterion = criterion.cuda()

    # Normalizing
    if not opt.no_mean_norm:
        opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
        opt.std = get_std(opt.norm_value, dataset=opt.std_dataset)
        norm_method = Normalize(opt.mean, opt.std)
    else:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])

    print(opt)
    with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
        json.dump(vars(opt), opt_file)

    # **************************** TRAINING CONFIGURATIONS ************************************
    assert opt.train_crop in ['corner', 'center']
    if opt.train_crop == 'corner':
        crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
    elif opt.train_crop == 'center':
        crop_method = MultiScaleCornerCrop(opt.scales,
                                           opt.sample_size,
                                           crop_positions=['c'])

    # Пространственное преобразование
    spatial_transform = Compose([
        crop_method,
        #RandomHorizontalFlip(),
        ToTensor(opt.norm_value),
        norm_method
    ])
    # Временное преобразование
    temporal_transform = TemporalRandomCrop(opt.sample_duration)
    # Целевое преобразование
    target_transform = ClassLabel()

    train_loader_list = []
    if not opt.no_cross_validation:
        annotation_list = os.listdir(opt.annotation_directory)
        for annotation in annotation_list:
            opt.annotation_path = os.path.join(opt.annotation_directory,
                                               annotation)
            training_data = get_training_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
            train_loader = torch.utils.data.DataLoader(
                training_data,
                batch_size=opt.batch_size,
                shuffle=True,
                num_workers=opt.n_threads,
                pin_memory=True)
            train_loader_list.append(train_loader)
    else:
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_loader_list.append(train_loader)

    train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                          ['epoch', 'loss', 'acc', 'lr'])
    train_batch_logger = Logger(
        os.path.join(opt.result_path, 'train_batch.log'),
        ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

    optimizer = optim.SGD(parameters,
                          lr=opt.learning_rate,
                          momentum=opt.momentum,
                          dampening=opt.dampening,
                          weight_decay=opt.weight_decay)

    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=opt.lr_patience)

    # ***************************** VALIDATION CONFIGURATIONS *********************************
    spatial_transform = Compose([
        Scale(opt.sample_size),
        CenterCrop(opt.sample_size),
        ToTensor(opt.norm_value), norm_method
    ])
    temporal_transform = LoopPadding(opt.sample_duration)
    target_transform = ClassLabel()

    val_loader_list = []
    if not opt.no_cross_validation:
        annotation_list = os.listdir(opt.annotation_directory)
        for annotation in annotation_list:
            opt.annotation_path = os.path.join(opt.annotation_directory,
                                               annotation)
            validation_data = get_validation_set(opt, spatial_transform,
                                                 temporal_transform,
                                                 target_transform)
            val_loader = torch.utils.data.DataLoader(validation_data,
                                                     batch_size=opt.batch_size,
                                                     shuffle=False,
                                                     num_workers=opt.n_threads,
                                                     pin_memory=True)
            val_loader_list.append(val_loader)
    else:
        validation_data = get_validation_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
        val_loader = torch.utils.data.DataLoader(validation_data,
                                                 batch_size=opt.batch_size,
                                                 shuffle=False,
                                                 num_workers=opt.n_threads,
                                                 pin_memory=True)
        val_loader_list.append(val_loader)

    val_logger = Logger(os.path.join(opt.result_path, 'val.log'),
                        ['epoch', 'loss', 'acc'])

    # **************************************** TRAINING ****************************************
    epoch_avg_time = AverageMeter()
    train_loss_list = []
    train_acc_list = []
    valid_acc_list = []
    best_accuracy = 0
    current_train_data = 0
    current_valid_data = 0
    opt.frequence_cross_validation = round(opt.n_epochs /
                                           opt.n_cross_validation_sets + 0.5)

    for epoch in range(opt.begin_epoch, opt.n_epochs + 1):
        epoch_start_time = time.time()
        print('Epoch #' + str(epoch))

        # optimizer = regulate_learning_rate(optimizer, epoch, opt.frequence_regulate_lr)

        train_loader = train_loader_list[current_train_data]
        if not opt.no_cross_validation and epoch % opt.frequence_cross_validation == 0:
            print('\t##### Cross-validation: switch training data #####')
            current_train_data = (current_train_data +
                                  1) % len(train_loader_list)
            train_loader = train_loader_list[current_train_data]
        train_loss, train_acc = train_epoch(epoch, train_loader, model,
                                            criterion, optimizer, opt,
                                            train_logger, train_batch_logger)

        val_loader = val_loader_list[current_valid_data]
        if not opt.no_cross_validation and epoch % opt.frequence_cross_validation == 0:
            print('\t##### Cross-validation: switch validation data #####')
            current_valid_data = (current_valid_data +
                                  1) % len(val_loader_list)
            val_loader = val_loader_list[current_valid_data]
        validation_acc = val_epoch(epoch, val_loader, model, criterion, opt,
                                   val_logger)

        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        valid_acc_list.append(validation_acc)

        # Save model with best accuracy
        if validation_acc > best_accuracy:
            best_accuracy = validation_acc
            save_file_path = os.path.join(opt.result_path, 'best_model.pth')
            states = {
                'epoch': epoch + 1,
                'arch': opt.arch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(states, save_file_path)

        epoch_end_time = time.time() - epoch_start_time
        epoch_avg_time.update(epoch_end_time)
        print('\tTime left: ' +
              str(round(epoch_avg_time.avg *
                        (opt.n_epochs - epoch) / 60, 1)) + ' minutes')

    # ******************************* SAVING RESULTS OF TRAINING ******************************
    save_pictures(np.linspace(1, opt.n_epochs, opt.n_epochs),
                  train_loss_list, 'red', 'Loss',
                  os.path.join(opt.result_path, 'train_loss.png'))
    save_pictures(np.linspace(1, opt.n_epochs, opt.n_epochs), train_acc_list,
                  'blue', 'Accuracy',
                  os.path.join(opt.result_path, 'train_accuracy.png'))
    save_pictures(np.linspace(1, opt.n_epochs, opt.n_epochs), valid_acc_list,
                  'blue', 'Accuracy',
                  os.path.join(opt.result_path, 'validation_accuracy.png'))