예제 #1
0
#Contact: Xiao Wang ([email protected], [email protected])

#Some codes adopted from https://github.com/facebookresearch/moco

from ops.argparser import  argparser
from ops.Config_Envrionment import Config_Environment
import torch.multiprocessing as mp
from training.main_worker import main_worker
def main(args):
    #config environment
    ngpus_per_node=Config_Environment(args)

    # call training main control function
    if args.multiprocessing_distributed==1:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)


if __name__ == '__main__':
    #use_cuda = torch.cuda.is_available()
    #print("starting check cuda status",use_cuda)
    #if use_cuda:
    args,params=argparser()
    main(args)
예제 #2
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

# GNU General Public License V3 for more details.

#

# You should have received a copy of the GNU v3.0 General Public License

# along with this program.  If not, see https://www.gnu.org/licenses/gpl-3.0.en.html.
import os
from ops.argparser import argparser
from ops.os_operation import mkdir
import shutil

if __name__ == "__main__":
    params = argparser()
    if params['mode'] == 0:
        input_map = params['F']
        input_map = os.path.abspath(input_map)
        type = params['type']
        choose = params['gpu']
        os.environ["CUDA_VISIBLE_DEVICES"] = choose
        if type == 0:
            indicate = 'SIMU6'
        elif type == 1:
            indicate = 'SIMU10'
        elif type == 2:
            indicate = 'SIMU_MIX'
        elif type == 3:
            indicate = 'REAL'
        else:
예제 #3
0
def main(args):
    if args.choose is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.choose
        print("Current we choose gpu:%s" % args.choose)
    #config environment
    ngpus_per_node = Config_Environment(args)

    # call training main control function
    if args.multiprocessing_distributed == 1:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)


if __name__ == '__main__':
    #use_cuda = torch.cuda.is_available()
    #print("starting check cuda status",use_cuda)
    #if use_cuda:
    parser = argparser()
    args = parser.parse_args()
    main(args)