Пример #1
0
    def run(self, resource, *args, **kwargs):
        log.debug("RAW SSH: %s", args)

        # TODO: check for better sanitization
        args = map(lambda x: x.replace('"', '\\"'), args)

        commands = []
        prefix = []
        if kwargs.get('use_sudo', False):
            prefix.append('sudo')

        if kwargs.get('cwd'):
            cmd = prefix + ['cd', kwargs['cwd']]
            commands.append(' '.join(cmd))

        env = []
        if 'env' in kwargs:
            for key, value in kwargs['env'].items():
                env.append('{}={}'.format(key, value))

        cmd = prefix + env + list(args)
        commands.append(' '.join(cmd))

        remote_cmd = '\"%s\"' % ' && '.join(commands)

        settings = self.settings(resource)
        if settings.get('password'):
            env = os.environ.copy()
            env['SSHPASS'] = settings['password']
        else:
            env = os.environ
        ssh_cmd = self._ssh_cmd(settings)
        ssh_cmd += (self._ssh_command_host(settings), remote_cmd)

        log.debug("RAW SSH CMD: %r", ssh_cmd)
        # TODO convert it to SolarRunResult

        res = execute(' '.join(ssh_cmd), shell=True, env=env)
        log.debug("Remote SSH result: %r", res)
        return SolarTransportResult.from_tuple(*res)
Пример #2
0
 def run(self, resource, *args, **kwargs):
     # TODO: clean on exceptions too
     api = HTTPClient(KubeConfig.from_file('~/.kube/config'))
     # handler = resource.db_obj.handler
     command = args
     items = self.get_volume_items(resource)
     sync_transport = resource._bat_transport_sync
     name = sync_transport.data_sufix
     job_name = 'job' + name
     # kubernetes api...
     obj = {
         'apiVersion': 'batch/v1',
         'kind': 'Job',
         'metadata': {
             'name': job_name
         },
         'spec': {
             'template': {
                 'metadata': {
                     'name': 'cnts' + name
                 },
                 'spec': {
                     'containers': [{
                         'name':
                         'cnt' + name,
                         'image':
                         'solarproject/ansible:latest',
                         'command':
                         command,
                         'volumeMounts': [{
                             'name': 'config-volume',
                             'mountPath': '/tmp'
                         }]
                     }],
                     'volumes': [{
                         'name': 'config-volume',
                         'configMap': {
                             'name': sync_transport.configmap_name,
                             'items': items
                         }
                     }],
                     'restartPolicy':
                     'OnFailure'
                 }
             }
         }
     }
     self.job_obj = job_obj = Job(api, obj)
     job_obj.create()
     log.debug("Created JOB: %s", job_obj.name)
     job_status = False
     rc = 0
     while True:
         log.debug("Starting K8S job loop check")
         time.sleep(1)
         job_obj.reload()
         job_status = job_obj.obj['status']
         if job_status.get('active', 0) >= 1:
             log.debug("Job is active")
             # for now assuming that we have only one POD for JOB
             pods = list(
                 pykube.Pod.objects(api).filter(
                     selector='job-name={}'.format(job_name)))
             if pods:
                 pod = pods[0]
                 log.debug("Found pods for job")
                 rc, status = self._pod_status(pod)
                 if rc > 1:
                     log.debug("Container was restarted")
                     break
                 if status == 'Error':
                     log.debug("State is Error")
                     break
         if job_status.get('succeeded', 0) >= 1:
             log.debug("Job succeeded")
             job_status = True
             pods = list(
                 pykube.Pod.objects(api).filter(
                     selector='job-name={}'.format(job_name)))
             pod = pods[0]
             break
     txt_logs = pod.get_logs()
     log.debug("Output from POD: %s", txt_logs)
     if job_status:
         stdout = txt_logs
         stderr = ''
     else:
         stdout = ''
         stderr = txt_logs
     self._clean_job(sync_transport.configmap_obj, self.job_obj)
     return SolarTransportResult.from_tuple(rc, stdout, stderr)
Пример #3
0
 def run(self, resource, *args, **kwargs):
     # TODO: clean on exceptions too
     api = HTTPClient(KubeConfig.from_file('~/.kube/config'))
     # handler = resource.db_obj.handler
     command = args
     items = self.get_volume_items(resource)
     sync_transport = resource._bat_transport_sync
     name = sync_transport.data_sufix
     job_name = 'job' + name
     # kubernetes api...
     obj = {
         'apiVersion': 'batch/v1',
         'kind': 'Job',
         'metadata': {'name': job_name},
         'spec': {'template':
                  {'metadata': {
                      'name': 'cnts' + name
                      },
                   'spec': {
                       'containers': [
                           {'name': 'cnt' + name,
                            'image': 'solarproject/ansible:latest',
                            'command': command,
                            'volumeMounts': [
                                {'name': 'config-volume',
                                 'mountPath': '/tmp'}
                            ]}
                       ],
                       'volumes': [
                           {'name': 'config-volume',
                            'configMap': {
                             'name': sync_transport.configmap_name,
                             'items': items
                            }}
                       ],
                       'restartPolicy': 'OnFailure'
                   }}}}
     self.job_obj = job_obj = Job(api, obj)
     job_obj.create()
     log.debug("Created JOB: %s", job_obj.name)
     job_status = False
     rc = 0
     while True:
         log.debug("Starting K8S job loop check")
         time.sleep(1)
         job_obj.reload()
         job_status = job_obj.obj['status']
         if job_status.get('active', 0) >= 1:
             log.debug("Job is active")
             # for now assuming that we have only one POD for JOB
             pods = list(pykube.Pod.objects(api).filter(selector='job-name={}'.format(job_name)))
             if pods:
                 pod = pods[0]
                 log.debug("Found pods for job")
                 rc, status = self._pod_status(pod)
                 if rc > 1:
                     log.debug("Container was restarted")
                     break
                 if status == 'Error':
                     log.debug("State is Error")
                     break
         if job_status.get('succeeded', 0) >= 1:
             log.debug("Job succeeded")
             job_status = True
             pods = list(pykube.Pod.objects(api).filter(selector='job-name={}'.format(job_name)))
             pod = pods[0]
             break
     txt_logs = pod.get_logs()
     log.debug("Output from POD: %s", txt_logs)
     if job_status:
         stdout = txt_logs
         stderr = ''
     else:
         stdout = ''
         stderr = txt_logs
     self._clean_job(sync_transport.configmap_obj, self.job_obj)
     return SolarTransportResult.from_tuple(rc, stdout, stderr)