Esempio n. 1
0
class StarClusterTest(unittest.TestCase):

    __cfg = None

    @property
    def config(self):
        """ Returns (valid) default test config """
        if not self.__cfg:
            tmp_file = tempfile.NamedTemporaryFile()
            tmp_file.write(config_test_template % default_config)
            tmp_file.flush()
            self.__cfg = StarClusterConfig(tmp_file.name, cache=True); self.__cfg.load()
        return self.__cfg

    def get_config(self, contents, cache=False):
        tmp_file = tempfile.NamedTemporaryFile()
        tmp_file.write(contents)
        tmp_file.flush()
        cfg = StarClusterConfig(tmp_file.name, cache=True); cfg.load()
        return cfg

    def get_custom_config(self, **kwargs):
        """ Returns default test config modified by kwargs """
        kwords = {}; 
        kwords.update(default_config)
        kwords.update(kwargs)
        cfg = self.get_config(config_test_template % kwords)
        return cfg
Esempio n. 2
0
 def test_config_dne(self):
     tmp_file = tempfile.NamedTemporaryFile()
     non_existent_file = tmp_file.name
     tmp_file.close()
     assert not os.path.exists(non_existent_file)
     try:
         cfg = StarClusterConfig(non_existent_file, cache=True); cfg.load()
     except exception.ConfigNotFound,e:
         pass
Esempio n. 3
0
 def get_custom_config(self, **kwargs):
     """ Returns default test config modified by kwargs """
     tmp_file = tempfile.NamedTemporaryFile()
     kwords = {}; 
     kwords.update(default_config)
     kwords.update(kwargs)
     tmp_file.write(config_test_template % kwords);
     tmp_file.flush()
     cfg = StarClusterConfig(tmp_file.name, cache=True); cfg.load()
     return cfg
Esempio n. 4
0
 def test_invalid_config(self):
     """
     Test that reading a non-INI formatted file raises an exception
     """
     tmp_file = tempfile.NamedTemporaryFile()
     tmp_file.write("""<html>random garbage file with no section headings</html>""")
     tmp_file.flush()
     try:
         cfg = StarClusterConfig(tmp_file.name, cache=True); cfg.load()
     except exception.ConfigHasNoSections,e:
         pass
Esempio n. 5
0
 def test_empty_config(self):
     """
     Test that reading an empty config generates no errors and that aws
     credentials can be read from the environment.
     """
     aws_key = 'testkey'
     aws_secret_key = 'testsecret'
     os.environ['AWS_ACCESS_KEY_ID'] = aws_key
     os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_key
     tmp_file = tempfile.NamedTemporaryFile()
     cfg = StarClusterConfig(tmp_file.name, cache=True); cfg.load()
     assert cfg.aws['aws_access_key_id'] == aws_key
     assert cfg.aws['aws_secret_access_key'] == aws_secret_key
Esempio n. 6
0
def get_config(cluster_tag=None, template_name=None):
    """
    Loads a list of configurations started with the given <cluster_tag>,
    most recent configuration is first
    """
    cfg = StarClusterConfig().load()
    if not cluster_tag and not template_name:
        log.warning("Attempt to clone without a template_name or cluster_tag")
        return []
    s3_bucket = cfg.aws['aws_meta_bucket']
    table_name = cfg.aws['aws_config_table']
    if table_name is None:
        log.warning(("AWS_CONFIG_TABLE is not defined."
                     " This cluster will not be cloneable."))
        return False
    if s3_bucket is None:
        log.warning(("AWS_META_BUCKET is not defined."
                     "This cluster will not be cloneable"))
        return False
    conn = boto.dynamodb2.connect_to_region('us-east-1')
    cluster_info_table = Table(table_name, connection=conn)
    #print cluster_info_table.describe()
    if cluster_tag:
        clusters = cluster_info_table.query(cluster_tag__eq=cluster_tag)
    else:
        clusters = cluster_info_table.scan(template_name__eq=template_name)
    if clusters:
        return [c for c in clusters]
    else:
        return []
Esempio n. 7
0
 def config(self):
     """ Returns (valid) default test config """
     if not self.__cfg:
         tmp_file = tempfile.NamedTemporaryFile()
         tmp_file.write(config.config_test_template % config.default_config)
         tmp_file.flush()
         self.__cfg = StarClusterConfig(tmp_file.name, cache=True).load()
     return self.__cfg
Esempio n. 8
0
 def config(self):
     """ Returns (valid) default test config """
     if not self.__cfg:
         tmp_file = tempfile.NamedTemporaryFile()
         tmp_file.write(config_test_template % default_config)
         tmp_file.flush()
         self.__cfg = StarClusterConfig(tmp_file.name, cache=True); self.__cfg.load()
     return self.__cfg
Esempio n. 9
0
def start_cluster(request):
    gpunum = request.POST["gpunum"]
    cluster_size = int(math.sqrt(int(gpunum)))
    varlist = json.dumps(["acp", "hgt", "alp"])
    var3dlist = json.dumps(["delt", "dl", "sc", "dd", "dw", "dflx"])
    var3dlistflow = json.dumps(["uh,vh", "zhyb", "omg", "rr", "dc"])

    root_path = STORAGE_ROOTPATH  # use this for server
    #    root_path = "C:/QLiu/RemoteVizNow/NOW_geoviz/" # use this for localhost
    conf_template_path = root_path + "config_template"
    conf_new_path = root_path + "config"

    with open(conf_template_path, 'r') as f:
        conf_template = f.read()

    new_conf = conf_template.replace("[MY_CLUSTER_SIZE]", str(cluster_size))

    with open(conf_new_path, 'wb') as f:
        f.write(new_conf)

    cfg = StarClusterConfig(conf_new_path)
    cfg.load()
    cluster = cfg.get_cluster_template("geovizcluster")
    try:
        cluster.start()
    except Exception as e:
        print e

    while not cluster.is_cluster_up():
        sleep(10)

    for node in cluster.nodes:
        cmd = "sudo /usr/bin/X :1 && screen"
        node.ssh.execute(cmd, source_profile=False, detach=True)

    return {
        "varlist": varlist,
        "var3dlist": var3dlist,
        "var3dlistflow": var3dlistflow,
        "startcluster": "yes",
        "region": "1",
        "instancetype": "1",
        "gpunum": gpunum,
    }
Esempio n. 10
0
def terminate_cluster(request):
    gpunum = request.POST["gpunum"]
    varlist = json.dumps(["acp", "hgt", "alp"])
    var3dlist = json.dumps(["delt", "dl", "sc", "dd", "dw", "dflx"])
    var3dlistflow = json.dumps(["uh,vh", "zhyb", "omg", "rr", "dc"])

    root_path = STORAGE_ROOTPATH  # use this for server
    #    root_path = "C:/QLiu/RemoteVizNow/NOW_geoviz/" # use this for localhost
    conf_new_path = root_path + "config"

    cfg = StarClusterConfig(conf_new_path)
    cfg.load()
    cluster = cfg.get_cluster_template("geovizcluster")
    try:
        cluster.terminate_cluster(force=True)
    except:
        cluster.terminate_cluster(force=True)

    return {
        "varlist": varlist,
        "var3dlist": var3dlist,
        "var3dlistflow": var3dlistflow,
        "terminatecluster": "yes",
    }
Esempio n. 11
0
def checkPrices(ctype=None, cluster_tag=None, max_times=10):
    """
    Checks the spot prices
    no args compares currently running clusters to possible spot prices
    ctype checks for a certain type of image i.e. 'm1.xlarge' and returns 
    prices in order of cheapest to most expensive
    cluster_tag checks only for one running cluster
    """
    assert ctype is None or cluster_tag is None, ("you cannot specify a "
                                                  "cluster and an image type.")
    if ctype == None:
        cfg = StarClusterConfig().load()
        cm = ClusterManager(cfg)
        for cluster in cm.get_clusters():
            if cluster_tag is not None and cluster.cluster_tag != cluster_tag:
                continue
            region, template = readLog(tag=cluster.cluster_tag)
            t = "%s - of type(%s) in zone(%s) with bid(%s)" % (
                cluster.cluster_tag, cluster.master_instance_type,
                cluster._nodes[0].placement, cluster.spot_bid)
            print "=" * len(t)
            print t
            sp = getSpotPrices(cluster.master_instance_type,
                               max_times=max_times)
            print sp[cluster._nodes[0].placement]['print']
            print "=" * len(t)
            print
            print
            print "Better/equal prices @"
            curr = sp[cluster._nodes[0].placement]['current']
            for k, v in sp.iteritems():
                if curr >= v['current'] and k != cluster._nodes[0].placement:
                    print v['print']
    else:
        sp = getSpotPrices(ctype, max_times=max_times)
        a = [(p['current'], k) for k, p in sp.iteritems()]
        a.sort()
        print "type(%s)  from cheapest to most expensive" % ctype
        for _, k in a:
            print sp[k]['print']
Esempio n. 12
0
def store_config(cluster_template,
                 cluster_tag,
                 cluster_region,
                 template_name=None,
                 zone=None):
    """
    Stores json encoded cluster_template used to start up <cluster tag>
    in <cluster_regioHHn>.
    <zone> is zone name if cluster is tied to a zone
    <cluster_region> in s3.

    Pointer to template is stored in dynamo.
    """
    cfg = StarClusterConfig().load()
    s3_bucket = cfg.aws['aws_meta_bucket']
    table_name = cfg.aws['aws_config_table']
    if table_name is None:
        log.warning(("AWS_CONFIG_TABLE is not defined."
                     " This cluster will not be cloneable."))
        return False
    if s3_bucket is None:
        log.warning(("AWS_META_BUCKET is not defined."
                     "This cluster will not be cloneable"))
        return False
    conn = boto.dynamodb2.connect_to_region('us-east-1')
    cluster_info_table = Table(table_name, connection=conn)
    if cluster_info_table is None:
        return log.warning("%s table not found in us-east-1" % table_name)
    config_path = config_to_s3(json.dumps(cluster_template), s3_bucket)
    #data to be stored on dyname
    table_data = {}
    table_data['cluster_tag'] = cluster_tag
    table_data['template_name'] = template_name if template_name else 'NA'
    table_data['timestamp'] = time.strftime("%Y-%m-%d %H:%M:%S +0000",
                                            time.gmtime())
    table_data['config_path'] = config_path
    table_data['region'] = cluster_region
    table_data['zone'] = zone.name if zone else 'NA'
    cluster_info_table.put_item(data=table_data)
    return True
Esempio n. 13
0
def writeconfig_ray(request):
    if request.method == 'POST':
        config_txt = request.POST["configtxt"]
        root_path = STORAGE_ROOTPATH
        wwconfig_path = root_path + "wwconfig.txt"
        with open(wwconfig_path, 'wb') as f:
            f.write(config_txt)

        conf_new_path = root_path + "config"
        cfg = StarClusterConfig(conf_new_path)
        cfg.load()
        cluster = cfg.get_cluster_template("geovizcluster")

        tasknum = int(request.POST["tasknum"])
        gpunum = math.sqrt(tasknum)
        datafile = "data/" + request.POST["datafile"]
        varname = "dd"
        if request.POST["varname3d"] and request.POST["varname3d"] != "":
            varname = request.POST["varname3d"]
        intrange = 80
        curtime = 8
        if request.POST["curtime"] and request.POST["curtime"] != "":
            curtime = int(request.POST["curtime"])

        w, h = int(request.POST["wwwidth"]), int(request.POST["wwheight"])
        wdiv, hdiv = w / gpunum, h / gpunum

        #node_alias_list = []
        img_list = []

        for index, node in enumerate(cluster.nodes):
            #node_alias_list.append(node.alias)
            # send wwconfig file
            #local_config_path = "/home/bitnami/apps/django/django_projects/geoviz/geoviz/geovizapp/static/data/wwconfig.txt"
            local_config_path = wwconfig_path
            remote_config_path = "/home/ubuntu/CollabViz/wwconfig.txt"
            node.ssh.switch_user("ubuntu")
            node.ssh.put(local_config_path, remote_config_path)

            imgoutpath = "image/sub/"
            remote_root_path = "/home/ubuntu/CollabViz/"
            remote_img_path = remote_root_path + imgoutpath

            tmp_imgs = []
            eachround = int(math.ceil(tasknum * 1.0 / gpunum))
            for i in range(index * eachround, (index + 1) * eachround):
                if i < tasknum:
                    curgpu = i
                    wrange, hrange = int(i / gpunum), int(i % gpunum)
                    w1, h1 = int(wrange * wdiv), int(hrange * hdiv)
                    w2, h2 = int((wrange + 1) * wdiv), int((hrange + 1) * hdiv)
                    tmp_imgs.append("%d_%d_%d_%d.png" % (w1, h1, w2, h2))
            img_list.append(tmp_imgs)

            cmd = "sudo /home/ubuntu/anaconda/bin/python %s%s %d %d %d %s %s %d %d" % (
                remote_root_path, "NetCDFCUDARayCasting.py", tasknum, gpunum,
                index, datafile, varname, intrange, curtime)
            node.ssh.execute(cmd, source_profile=False, detach=True)

        local_img_path = root_path + "img/sub/"
        for index, node in enumerate(cluster.nodes):
            for img in img_list[index]:
                img_get = False
                img_get_path = remote_img_path + img
                while not img_get:
                    if node.ssh.isfile(img_get_path):
                        node.ssh.get(img_get_path, local_img_path + img)
                        img_get = True
                        cmd = "sudo rm %s" % img_get_path
                        node.ssh.execute(cmd,
                                         source_profile=False,
                                         detach=True)

        ncvrimage = NCVRImage(filepath=local_img_path)
        final_ray_img_file = "ray_%s.png" % datetime.datetime.now().strftime(
            '%Y%m%d%H%H%M%S')
        final_ray_img_file_path = root_path + "img/" + final_ray_img_file
        ncvrimage.MergeImageFromFolder(local_img_path, final_ray_img_file_path,
                                       w, h)

        response_data = {"newimgfile": final_ray_img_file}
        return HttpResponse(json.dumps(response_data),
                            content_type="application/json")
Esempio n. 14
0
        return True

    def _validate_keypair(self):
        key_location = self.key_location
        if not os.path.exists(key_location):
            raise exception.ClusterValidationError("key_location=%s does not exist." % key_location)
        elif not os.path.isfile(key_location):
            raise exception.ClusterValidationError("key_location=%s is not a file." % key_location)
        keyname = self.keyname
        conn = self.ec2
        keypair = self.ec2.get_keypair_or_none(keyname)
        if not keypair:
            raise exception.ClusterValidationError("Account does not contain a key with keyname = %s. " % keyname)
        if self.zone:
            z = self.ec2.get_zone(self.zone)
            if keypair.region != z.region:
                raise exception.ClusterValidationError(
                    "Keypair %s not in availability zone region %s" % (keyname, z.region)
                )
        return True


if __name__ == "__main__":
    from starcluster.config import StarClusterConfig

    cfg = StarClusterConfig()
    cfg.load()
    sc = cfg.get_cluster_template("smallcluster", "mynewcluster")
    if sc.is_valid():
        sc.start(create=True)
Esempio n. 15
0
#!/usr/bin/env python
import os
import sys

from starcluster.config import StarClusterConfig

print 'Simple wrapper script for s3fs (http://s3fs.googlecode.com/)'

cfg = StarClusterConfig(); cfg.load()
ec2 = cfg.get_easy_ec2()
buckets = ec2.s3.get_buckets()
counter = 0
for bucket in buckets:
    print "[%d] %s" % (counter,bucket.name)
    counter += 1

try:
    inp = int(raw_input('>>> Enter the bucket to mnt: '))
    selection = buckets[inp].name
    print 'you selected: %s' % selection
    mountpt = raw_input('>>> please enter the mnt point: ')
    print 'mounting %s at: %s' % (selection,mountpt)
except KeyboardInterrupt,e:
    print
    print 'Exiting...'
    sys.exit(1)

try:
    os.system('s3fs %s -o accessKeyId=%s -o secretAccessKey=%s %s' % (selection,
                                                                      cfg.aws.get('aws_access_key_id'),
                                                                      cfg.aws.get('aws_secret_access_key'),mountpt))
Esempio n. 16
0
#!/usr/bin/env python
import os
import sys

from starcluster.config import StarClusterConfig

print 'Simple wrapper script for s3fs (http://s3fs.googlecode.com/)'

cfg = StarClusterConfig().load()
ec2 = cfg.get_easy_ec2()
buckets = ec2.s3.get_buckets()
counter = 0
for bucket in buckets:
    print "[%d] %s" % (counter,bucket.name)
    counter += 1

try:
    inp = int(raw_input('>>> Enter the bucket to mnt: '))
    selection = buckets[inp].name
    print 'you selected: %s' % selection
    mountpt = raw_input('>>> please enter the mnt point: ')
    print 'mounting %s at: %s' % (selection,mountpt)
except KeyboardInterrupt,e:
    print
    print 'Exiting...'
    sys.exit(1)

try:
    os.system('s3fs %s -o accessKeyId=%s -o secretAccessKey=%s %s' % (selection,
                                                                      cfg.aws.get('aws_access_key_id'),
                                                                      cfg.aws.get('aws_secret_access_key'),mountpt))
Esempio n. 17
0
def cloneCluster(new_zone, cluster_tag=None, template=None):
    assert cluster_tag is not None or template is not None, \
            "You must provide a cluster template or a cluster tag"
    #get full config
    cfg = StarClusterConfig().load()
    cm = ClusterManager(cfg)
    if new_zone[-1] not in ['a', 'b', 'c', 'd']:
        raise Exception("%s does not appear to be a ZONE" % new_zone)
    dest_region = new_zone[:-1]
    if cluster_tag is not None:
        lookup = get_config(cluster_tag=cluster_tag)
        if lookup:
            template = lookup[0]['template_name']
            cluster_cfg = load_config(lookup[0]['config_path'])
        else:
            raise Exception("Unable to find cluster tag[%s]" % cluster_tag)
    else:
        lookup = get_config(template_name=template)
        if lookup:
            template = lookup[0]['template_name']
            cluster_cfg = load_config(lookup[0]['config_path'])
        else:
            raise Exception("Unable to find template[%s]" % cluster_tag)
    #get autogenerated portion of config
    auto_config = ConfigParser.RawConfigParser()
    auto_config.read(
        os.path.join(static.STARCLUSTER_CFG_DIR, 'cfgs/auto-gen.cfg'))
    new_cluster_sect = []
    new_cluster_sect.append(('KEYNAME', static.KEYNAMES[dest_region]))
    new_cluster_sect.append(('EXTENDS', template))
    new_volume_sect = []
    nit_v = ''
    if 'node_instance_types' in cluster_cfg:
        for nit in cluster_cfg['node_instance_types']:
            source_region = regionFromAMI(nit['image'])
            new_ami = cpAMI(dest_region=dest_region,
                            source_region=source_region,
                            source_image_id=nit['image'])
            if len(nit_v) > 0:
                nit_v += ', '
            if 'type' in nit:
                nit_v += ':'.join([nit['type'], new_ami, str(nit["size"])])
            else:
                nit_v += new_ami
    if 'node_instance_type' in cluster_cfg \
            and cluster_cfg['node_instance_type']:
        if len(nit_v) > 0:
            nit_v += ', '
        nit_v += cluster_cfg['node_instance_type']
        new_cluster_sect.append(('NODE_INSTANCE_TYPE', nit_v))
    print cluster_cfg
    if 'master_instance_type' in cluster_cfg \
                                    and cluster_cfg['master_instance_type']:
        new_cluster_sect.append(
            ('MASTER_INSTANCE_TYPE', cluster_cfg['master_instance_type']))
    if 'master_image_id' in cluster_cfg and cluster_cfg['master_image_id']:
        source_region = regionFromAMI(cluster_cfg['master_image_id'])
        new_ami = cpAMI(dest_region=dest_region,
                        source_region=source_region,
                        source_image_id=cluster_cfg['master_image_id'])
        new_cluster_sect.append(('MASTER_IMAGE_ID', new_ami))

    if 'node_image_id' in cluster_cfg and cluster_cfg['node_image_id']:
        source_region = regionFromAMI(cluster_cfg['node_image_id'])
        new_ami = cpAMI(dest_region=dest_region,
                        source_region=source_region,
                        source_image_id=cluster_cfg['node_image_id'])
        new_cluster_sect.append(('NODE_IMAGE_ID', new_ami))
    volumes_names = []
    if 'volumes' in cluster_cfg:
        for vname, vdesc in cluster_cfg['volumes'].iteritems():
            new_volume_sect = []
            vname = removeRegion(vname)
            volume_sec_name = 'volume %s-%s' % (vname, new_zone)
            counter = 0
            while auto_config.has_section(volume_sec_name):
                volume_sec_name = 'volume %s-%s-%02d' % (vname, new_zone,
                                                         counter)
                counter += 1
            region_from, zone_from = regionFromVolume(vdesc['volume_id'])
            if region_from is None:
                raise Exception("Cannot find %s  in any region." % v)
            new_vol_id = cpVolume(vdesc['volume_id'],
                                  new_zone[:-1],
                                  new_zone,
                                  region_from=region_from)
            new_volume_sect.append(('VOLUME_ID', new_vol_id))
            new_volume_sect.append(('MOUNT_PATH', vdesc['mount_path']))
            auto_config.add_section(volume_sec_name)
            for k, v in new_volume_sect:
                auto_config.set(volume_sec_name, k, v)
            volumes_names.append(volume_sec_name.split(' ')[1])
    if len(volumes_names) > 0:
        new_cluster_sect.append(('VOLUMES', ','.join(volumes_names)))
    counter = 0
    template = removeRegion(template)
    cluster_sec_name = 'cluster %s-%s' % (template, new_zone)
    while auto_config.has_section(cluster_sec_name):
        cluster_sec_name = 'cluster %s-%s-%02d' % (template, new_zone, counter)
        counter += 1
    auto_config.add_section(cluster_sec_name)
    for k, v in new_cluster_sect:
        auto_config.set(cluster_sec_name, k, v)
    with open(os.path.join(static.STARCLUSTER_CFG_DIR, 'cfgs/auto-gen.cfg'),
              'wb') as configfile:
        auto_config.write(configfile)
    log.info(("starcluster -r %s start --force-spot-master -b "
              "<my-bid> -c %s my-cluster") %
             (new_zone[:-1], cluster_sec_name.split(' ')[1]))
Esempio n. 18
0
#!/usr/bin/env python
import os
import sys

from starcluster.config import StarClusterConfig

print 'Simple wrapper script for s3fs (http://s3fs.googlecode.com/)'

cfg = StarClusterConfig().load()
ec2 = cfg.get_easy_ec2()
buckets = ec2.s3.get_buckets()
counter = 0
for bucket in buckets:
    print "[%d] %s" % (counter, bucket.name)
    counter += 1

try:
    inp = int(raw_input('>>> Enter the bucket to mnt: '))
    selection = buckets[inp].name
    print 'you selected: %s' % selection
    mountpt = raw_input('>>> please enter the mnt point: ')
    print 'mounting %s at: %s' % (selection, mountpt)
except KeyboardInterrupt, e:
    print
    print 'Exiting...'
    sys.exit(1)

try:
    os.system('s3fs %s -o accessKeyId=%s -o secretAccessKey=%s %s' %
              (selection, cfg.aws.get('aws_access_key_id'),
               cfg.aws.get('aws_secret_access_key'), mountpt))
Esempio n. 19
0
 def get_config(self, contents, cache=False):
     tmp_file = tempfile.NamedTemporaryFile()
     tmp_file.write(contents)
     tmp_file.flush()
     cfg = StarClusterConfig(tmp_file.name, cache=True); cfg.load()
     return cfg
Esempio n. 20
0
 def get_config(self, contents, cache=False):
     tmp_file = tempfile.NamedTemporaryFile()
     tmp_file.write(contents)
     tmp_file.flush()
     cfg = StarClusterConfig(tmp_file.name, cache=cache).load()
     return cfg
Esempio n. 21
0
def writeconfig_flow(request):
    if request.method == 'POST':
        config_txt = request.POST["configtxt"]
        root_path = STORAGE_ROOTPATH
        wwconfig_path = root_path + "wwconfig.txt"
        with open(wwconfig_path, 'wb') as f:
            f.write(config_txt)

        conf_new_path = root_path + "config"
        cfg = StarClusterConfig(conf_new_path)
        cfg.load()
        cluster = cfg.get_cluster_template("geovizcluster")

        tasknum = int(request.POST["tasknum"])
        gpunum = math.sqrt(tasknum)
        datafile = "data/" + request.POST["datafile"]
        varname1, varname2 = "uh", "vh"
        if request.POST["varnameflow"] and request.POST["varnameflow"] != "":
            varnameflow = request.POST["varnameflow"].split(",")
            varname1, varname2 = varnameflow[0], varnameflow[1]
        intrange = 80
        tseq = 8
        if request.POST["tseq"] and request.POST["tseq"] != "":
            curtime = int(request.POST["tseq"])
        colorselect = "ocean"
        if request.POST["colorselect"] and request.POST["colorselect"] != "":
            colorselect = request.POST["colorselect"]
        totalclass = 20
        if request.POST["totalclass"] and request.POST["totalclass"] != "":
            totalclass = int(request.POST["totalclass"])

        w, h = int(request.POST["wwwidth"]), int(request.POST["wwheight"])
        wdiv, hdiv = w / gpunum, h / gpunum

        #node_alias_list = []
        img_list = []

        for index, node in enumerate(cluster.nodes):
            #node_alias_list.append(node.alias)
            # send wwconfig file
            #local_config_path = "/home/bitnami/apps/django/django_projects/geoviz/geoviz/geovizapp/static/data/wwconfig.txt"
            local_config_path = wwconfig_path
            remote_config_path = "/home/ubuntu/CollabViz/wwconfig.txt"
            node.ssh.switch_user("ubuntu")
            node.ssh.put(local_config_path, remote_config_path)

            imgoutpath = "image/"
            remote_root_path = "/home/ubuntu/CollabViz/"
            remote_img_path = remote_root_path + imgoutpath

            tmp_imgs = []
            for i in range(index * tasknum / gpunum,
                           (index + 1) * tasknum / gpunum):
                tmp_imgs.append("flowgpu%d.png" % i)
            tmp_imgs.append("legend.png")
            img_list.append(tmp_imgs)

            cmd = "sudo DISPLAY=:1 /home/ubuntu/anaconda/bin/python %s%s %d %d %d %s %s %s %d %s %d" % (
                remote_root_path, "NetCDFCUDAFlow.py", tasknum, gpunum, index,
                datafile, varname1, varname2, tseq, colorselect, totalclass)
            node.ssh.execute(cmd, source_profile=False, detach=True)

        local_img_path = root_path + "img/sub/"
        for index, node in enumerate(cluster.nodes):
            for img in img_list[index]:
                img_get = False
                img_get_path = remote_img_path + img
                while not img_get:
                    if node.ssh.isfile(img_get_path):
                        node.ssh.get(img_get_path, local_img_path + img)
                        img_get = True
                        cmd = "sudo rm %s" % img_get_path
                        node.ssh.execute(cmd,
                                         source_profile=False,
                                         detach=True)

        ncvrimage = NCVRImage(filepath=local_img_path)
        final_flow_img_file = "flow_%s.png" % datetime.datetime.now().strftime(
            '%Y%m%d%H%H%M%S')
        final_flow_img_file_path = root_path + "img/" + final_flow_img_file
        ncvrimage.MergeImageFromFolder(local_img_path,
                                       final_flow_img_file_path, w, h)

        response_data = {"newimgfile": final_flow_img_file}
        return HttpResponse(json.dumps(response_data),
                            content_type="application/json")