def builder(self, *params):
     args = dict(zip([p.name for p in self.parameters], params))
     try:
         model_pytorch = self.model_fn(**args)
         return ModelPytorch(None, source=model_pytorch)
     except:
         str_param = ','.join('{0}={1!r}'.format(k, v)
                              for k, v in args.items())
         print("Failed to build model with params: {}".format(str_param))
         return None
Beispiel #2
0
    def builder(self, *params):
        args = dict(zip([p.name for p in self.parameters], params))
        try:
            model_pytorch = self.model_fn(**args)
            ## save it to a temp file indeed
            username = os.environ.get('USER')
            os.system('mkdir -p /tmp/{}'.format(username))
            args_s = str(args).encode('utf-8')
            hashs = hashlib.sha224(args_s).hexdigest()

            model_path = "/tmp/{}/_{}_{}_pytorch.torch".format(
                username, os.getpid(), hashs)
            torch.save(model_pytorch, model_path)
            return ModelPytorch(None, filename=model_path, gpus=self.gpus)
        except:
            str_param = ','.join('{0}={1!r}'.format(k, v)
                                 for k, v in args.items())
            print("Failed to build model with params: {}".format(str_param))
            return None
Beispiel #3
0
 hide_device = True
 if args.torch:
     logging.debug("Using pytorch")
     if not args.optimizer.endswith("torch"):
         args.optimizer = args.optimizer + 'torch'
     import torch
     if hide_device:
         os.environ[
             'CUDA_VISIBLE_DEVICES'] = device[-1] if 'gpu' in device else ''
         logging.debug('set to device %s',
                       os.environ['CUDA_VISIBLE_DEVICES'])
     else:
         if 'gpu' in device:
             torch.cuda.set_device(int(device[-1]))
     model_builder = ModelPytorch(comm,
                                  source=args.model,
                                  weights=model_weights,
                                  gpus=1 if 'gpu' in device else 0)
 else:
     if args.tf:
         logging.debug("Using TensorFlow")
         backend = 'tensorflow'
         if not args.optimizer.endswith("tf"):
             args.optimizer = args.optimizer + 'tf'
         if hide_device:
             os.environ['CUDA_VISIBLE_DEVICES'] = device[
                 -1] if 'gpu' in device else ''
             logging.debug('set to device %s',
                           os.environ['CUDA_VISIBLE_DEVICES'])
     else:
         logging.debug("Using Theano")
         backend = 'theano'
Beispiel #4
0
        val_list = [s.strip() for s in val_list_file.readlines()]

    comm = MPI.COMM_WORLD.Dup()

    # Theano is the default backend; use tensorflow if --tf is specified.
    # In the theano case it is necessary to specify the device before importing.
    device = get_device(comm,
                        args.masters,
                        gpu_limit=args.max_gpus,
                        gpu_for_master=args.master_gpu)
    hide_device = True
    if args.torch:
        print("Using pytorch")
        import torch
        model_builder = ModelPytorch(comm,
                                     filename=args.model_json,
                                     weights=args.model_weights,
                                     gpus=1)
    else:
        if args.tf:
            backend = 'tensorflow'
            if hide_device:
                os.environ['CUDA_VISIBLE_DEVICES'] = device[
                    -1] if 'gpu' in device else ''
                print('set to device', os.environ['CUDA_VISIBLE_DEVICES'])
        else:
            backend = 'theano'
            os.environ[
                'THEANO_FLAGS'] = "profile=%s,device=%s,floatX=float32" % (
                    args.profile, device.replace('gpu', 'cuda'))
        os.environ['KERAS_BACKEND'] = backend
Beispiel #5
0
 def builder(self, *params):
     args = dict(zip([p.name for p in self.parameters], params))
     model_pytorch = self.model_fn(**args)
     return ModelPytorch(None, filename=model_pytorch, gpus=self.gpus)
Beispiel #6
0
                     args.masters,
                     gpu_limit=args.max_gpus,
                     gpu_for_master=args.master_gpu)
 hide_device = True
 if args.torch:
     print("Using pytorch")
     import torch
     if hide_device:
         os.environ[
             'CUDA_VISIBLE_DEVICES'] = device[-1] if 'gpu' in device else ''
         print('set to device', os.environ['CUDA_VISIBLE_DEVICES'])
     else:
         if 'gpu' in device:
             torch.cuda.set_device(int(device[-1]))
     model_builder = ModelPytorch(comm,
                                  filename=args.model_json,
                                  weights=model_weights,
                                  gpus=1 if 'gpu' in device else 0)
 else:
     if args.tf:
         backend = 'tensorflow'
         if not args.optimizer.endswith("tf"):
             args.optimizer = args.optimizer + 'tf'
         if hide_device:
             os.environ['CUDA_VISIBLE_DEVICES'] = device[
                 -1] if 'gpu' in device else ''
             print('set to device', os.environ['CUDA_VISIBLE_DEVICES'])
     else:
         backend = 'theano'
         os.environ[
             'THEANO_FLAGS'] = "profile=%s,device=%s,floatX=float32" % (
                 args.profile, device.replace('gpu', 'cuda'))