Esempio n. 1
0
def removeDecommissionedMachine(slaveName=None):
    '''
    Destroy decommissioned machines from the cluster and removes traces from excludes and slaves file
    INPUT: String slaveName (optional)
    OUTPUT: boolean (True if successful, False otherwise)
    '''
    util.debug_print('calling downsize removeDecommissionedMachine()')

    # if not set, then get from excludes list
    if slaveName is None:
        util.debug_print('not slaveName passed as parameter')
        # get the excludes file from master
        excludes_file_content = util.get_file_content(
            config.DEFAULT_DESTINATION_EXCLUDES_FILENAME)

        # no magic, just get last one
        if len(excludes_file_content) > 0:
            slaveName = excludes_file_content[-1].strip()
        else:
            util.debug_print(
                'no slavename passed in as argument AND we got empty slaves file!'
            )
            return False

    # remove that slavename from excludes
    remove_line = slaveName + "\n"
    util.debug_print('removing from excludes file the line: ' + remove_line)
    update_excludes = util.updateFile('excludes', remove_line, addLine=False)

    # remove that name from slaves file
    util.debug_print('removing from slaves file the line: ' + str(remove_line))
    update_slaves = util.updateFile('slaves', remove_line, addLine=False)

    # get vmid from slaveName
    vmid = util.get_vm_id_by_name(slaveName)

    # NOW deestroy vm
    util.debug_print('Now we will be trying to destroy the machine with ID: ' +
                     str(vmid))
    result = api.destroyVirtualMachine({'id': vmid})

    util.debug_print(
        'waiting for the destroyed machine to be finished being destroyed')
    waitResult = util.waitForAsync(result.get('jobid'))

    # since we destroyed the vm, we can remove from master's /etc/hosts file
    hosts = util.get_file_content(config.DEFAULT_DESTINATION_HOSTS_FILENAME)
    checker = re.compile('.*' + slaveName + '\n')
    to_be_removed_hosts_line = [
        line for line in hosts if checker.match(line) is not None
    ]
    util.debug_print('remove line:' + str(to_be_removed_hosts_line) +
                     ' from /etc/hosts file')
    util.updateFile('hosts', to_be_removed_hosts_line[0], addLine=False)

    util.debug_print('Done destroying VM.')
    return True
Esempio n. 2
0
def decommission(also_stop_vm=True):
    '''
    This function basically copies slave names from slaves list to excludes list and run refresh scripts
    Input: None
    Output: None
    '''
    util.debug_print('Trying to decommission')

    # get all slave names in slaves file
    all_slave_names = map(
        str.strip,
        util.get_file_content(config.DEFAULT_DESTINATION_SLAVES_FILENAME))
    util.debug_print('all_slave_names:')
    util.debug_print(all_slave_names)

    # get excludes content from master
    excludes_list = map(
        str.strip,
        util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME))
    util.debug_print('current excludes list:')
    util.debug_print(excludes_list)

    # basic sanity check to see if we should try to decommission
    remaining_slaves = len(all_slave_names) - len(excludes_list)
    if remaining_slaves <= config.MINIMUM_DATANODE_SIZE:
        util.debug_print('We have reached the minimum cluster size of ' +
                         str(remaining_slaves) + ', skipping decomissioning.')
        return False

    # ok, now we know we can remove some
    removable_slaves = list(set(all_slave_names) - set(excludes_list))
    max_name = get_max_slavename(removable_slaves, return_all=False)
    util.debug_print('next slavename to remove is: ' + max_name)

    # ok, now we have the slave we want to decommission, update the excludes file
    newLine = max_name + "\n"
    util.updateFile('excludes', newLine)

    # run commands on the master that will output make decommission happen
    ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)

    util.debug_print('trying to hdfs dfsadmin -refreshNodes')
    outmsg, errmsg = ssh.sudo_command(
        'sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"'
    )

    util.debug_print('trying to yarn rmadmin -refreshNodes')
    outmsg, errmsg = ssh.sudo_command(
        'sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"'
    )

    if also_stop_vm:
        stopDecommissionedMachine(max_name)
Esempio n. 3
0
def removeDecommissionedMachine(slaveName = None):
    '''
    Destroy decommissioned machines from the cluster and removes traces from excludes and slaves file
    INPUT: String slaveName (optional)
    OUTPUT: boolean (True if successful, False otherwise)
    '''
    util.debug_print('calling downsize removeDecommissionedMachine()')
    
    # if not set, then get from excludes list
    if slaveName is None:
        util.debug_print('not slaveName passed as parameter')
        # get the excludes file from master
        excludes_file_content = util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME)
        
        # no magic, just get last one
        if len(excludes_file_content) > 0:
            slaveName = excludes_file_content[-1].strip()
        else:
            util.debug_print('no slavename passed in as argument AND we got empty slaves file!')
            return False
        
    # remove that slavename from excludes
    remove_line = slaveName + "\n"
    util.debug_print('removing from excludes file the line: ' + remove_line)
    update_excludes = util.updateFile('excludes', remove_line, addLine = False)
        
    # remove that name from slaves file
    util.debug_print('removing from slaves file the line: ' + str(remove_line))
    update_slaves = util.updateFile('slaves', remove_line, addLine = False)
    
    # get vmid from slaveName
    vmid = util.get_vm_id_by_name(slaveName)
    
    # NOW deestroy vm
    util.debug_print('Now we will be trying to destroy the machine with ID: ' + str(vmid))
    result = api.destroyVirtualMachine({'id': vmid})
    
    util.debug_print('waiting for the destroyed machine to be finished being destroyed')
    waitResult = util.waitForAsync(result.get('jobid'))
    
    # since we destroyed the vm, we can remove from master's /etc/hosts file
    hosts = util.get_file_content(config.DEFAULT_DESTINATION_HOSTS_FILENAME)
    checker = re.compile('.*' + slaveName + '\n')
    to_be_removed_hosts_line = [line for line in hosts if checker.match(line) is not None]
    util.debug_print('remove line:' + str(to_be_removed_hosts_line) + ' from /etc/hosts file')
    util.updateFile('hosts', to_be_removed_hosts_line[0], addLine = False)

    util.debug_print('Done destroying VM.')    
    return True
Esempio n. 4
0
def decommission(also_stop_vm = True):
    '''
    This function basically copies slave names from slaves list to excludes list and run refresh scripts
    Input: None
    Output: None
    '''
    util.debug_print('Trying to decommission')
    
    # get all slave names in slaves file
    all_slave_names = map(str.strip, util.get_file_content(config.DEFAULT_DESTINATION_SLAVES_FILENAME))
    util.debug_print('all_slave_names:')
    util.debug_print(all_slave_names)
    
    # get excludes content from master
    excludes_list = map(str.strip, util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME))
    util.debug_print('current excludes list:')
    util.debug_print(excludes_list)
    
    # basic sanity check to see if we should try to decommission 
    remaining_slaves =  len(all_slave_names) - len(excludes_list)
    if remaining_slaves <= config.MINIMUM_DATANODE_SIZE:
        util.debug_print('We have reached the minimum cluster size of ' + str(remaining_slaves) + ', skipping decomissioning.')
        return False
    
    # ok, now we know we can remove some 
    removable_slaves = list(set(all_slave_names) - set(excludes_list))
    max_name = get_max_slavename(removable_slaves, return_all=False)
    util.debug_print('next slavename to remove is: ' + max_name)
    
    # ok, now we have the slave we want to decommission, update the excludes file
    newLine = max_name + "\n"
    util.updateFile('excludes', newLine)

    # run commands on the master that will output make decommission happen
    ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
    
    util.debug_print('trying to hdfs dfsadmin -refreshNodes')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"')

    util.debug_print('trying to yarn rmadmin -refreshNodes')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"')
    
    if also_stop_vm:
        stopDecommissionedMachine(max_name)
Esempio n. 5
0
    def save(self, filename):
        """Saves the content to the specified filename.

        @param filename is the path and filename to the
          'module.sources' file to save.

        If the filename already exists and saved content is the same as the
        filename then it will not update file. This avoids unecessary updates
        to files used in build systems.

        Return True if the file was updated, False otherwise.
        """
        out = ""
        abs_root = os.path.normpath(os.path.dirname(os.path.abspath(filename)))
        def make_relative(fname):
            fname = os.path.normpath(os.path.abspath(fname))
            if fname.startswith(abs_root):
                fname = fname[len(abs_root)+1:]
            else:
                fname = fname
            return fname.replace(os.path.sep, "/")

        all = set(self.__plain_sources.all())
        pch = set(self.__plain_sources.pch())
        nopch = set(self.__plain_sources.nopch())
        pch_system_includes = set(self.__plain_sources.pch_system_includes())
        pch_jumbo = set(self.__plain_sources.pch_jumbo())

        jumbo_units = self.__jumbo_compile_units
        for jumbo_unit in jumbo_units.itervalues():
            out += "# [pch=%d;system_includes=%d;jumbo=%s]\n" % (int(jumbo_unit.pch()), int(jumbo_unit.system_includes()), jumbo_unit.name())
            for fname in jumbo_unit.source_files():
                options = ""
                if jumbo_unit.source_file_options(fname):
                    options = " # [%s]" % ",".join(map(lambda (k, v): '%s=%s' % (k, v),
                                                       zip(jumbo_unit.source_file_options(fname).keys(),
                                                           jumbo_unit.source_file_options(fname).values())))
                out += make_relative(fname) + options + "\n"
                for src_set in (all, pch, nopch, pch_system_includes, pch_jumbo):
                    src_set.discard(fname)

        plain = all - pch - nopch - pch_system_includes - pch_jumbo

        if pch_jumbo:
            out += "# [pch=1;jumbo=1]\n"
            for fname in pch_jumbo:
                out += make_relative(fname) + "\n"
        if pch:
            out += "# [pch=1]\n"
            for fname in pch:
                out += make_relative(fname) + "\n"
        if nopch:
            out += "# [pch=0]\n"
            for fname in nopch:
                out += make_relative(fname) + "\n"
        if pch_system_includes:
            out += "# [pch=1;system_includes=0]\n"
            for fname in pch_system_includes:
                out += make_relative(fname) + "\n"
        if plain:
            out += "# [pch=0;jumbo=0]\n"
            for fname in plain:
                out += make_relative(fname) + "\n"

        output = StringIO.StringIO()
        output.write(out)
        return util.updateFile(output, filename)
Esempio n. 6
0
 def updateFile(list, filename):
     content = StringIO.StringIO()
     for f in list:
         print >>content, f
     return util.updateFile(content, filename)
Esempio n. 7
0
    def __call__(self, sourceRoot, outputRoot=None, quiet="yes"):
        self.startTiming()
        if outputRoot is None: outputRoot = sourceRoot

        # File names
        css_properties_txt = os.path.join(sourceRoot, "modules", "style",
                                          "src", "css_properties.txt")
        css_aliases_template_h = os.path.join(sourceRoot, "modules", "style",
                                              "src", "css_aliases_template.h")
        css_aliases_h = os.path.join(outputRoot, "modules", "style", "src",
                                     "css_aliases.h")
        css_property_strings_template_h = os.path.join(
            sourceRoot, "modules", "style", "src",
            "css_property_strings_template.h")
        css_property_strings_h = os.path.join(outputRoot, "modules", "style",
                                              "src", "css_property_strings.h")
        css_properties_template_h = os.path.join(sourceRoot, "modules",
                                                 "style", "src",
                                                 "css_properties_template.h")
        css_properties_h = os.path.join(outputRoot, "modules", "style", "src",
                                        "css_properties.h")
        css_properties_internal_txt = os.path.join(
            sourceRoot, "modules", "style", "src",
            "css_properties_internal.txt")
        atoms_txt = os.path.join(sourceRoot, "modules", "dom", "src",
                                 "atoms.txt")

        # Read the property names from css_properties.txt into a set, with
        # their aliases in a dictionary, and the properties which are not
        # aliases into another set
        properties = set([])
        non_alias_properties = set([])
        aliases = dict({})
        try:
            f = None
            util.fileTracker.addInput(css_properties_txt)
            f = open(css_properties_txt)
            for line in f.read().split("\n"):
                if line:
                    props = line.split(",")
                    for p in props:
                        p = p.strip()
                        if p not in properties:
                            properties.add(p)
                        else:
                            self.error(
                                "Error: css property '%s' declared multiple times."
                                % p)
                            return self.endTiming(1, quiet=quiet)
                    name = props[0].strip()
                    non_alias_properties.add(name)
                    if len(props) >= 2:
                        for a in props[1:]:
                            a = a.strip()
                            aliases.setdefault(name, []).append(a)
        finally:
            if f: f.close()
        sorted_properties = sorted(properties, cmp=sort_properties)

        # Read internal property names from css_propertiesi_internal.txt into a
        # set
        internal_properties = set([])
        try:
            f = None
            util.fileTracker.addInput(css_properties_internal_txt)
            f = open(css_properties_internal_txt)
            for line in f.read().split("\n"):
                p = line.strip()
                if p:
                    if p not in internal_properties and p not in properties:
                        internal_properties.add(p)
                    else:
                        self.error(
                            "Error: css property '%s' declared multiple times."
                            % p)
                        return self.endTiming(1, quiet=quiet)
        finally:
            if f: f.close()
        sorted_internal_properties = sorted(internal_properties,
                                            cmp=sort_properties)

        # Regenerate css_properties.txt, with sorted non-alias properties each
        # followed by their alias to css_properties.txt,:
        output = StringIO.StringIO()
        for p in sorted(non_alias_properties, cmp=sort_properties):
            if p in aliases:
                output.write("%s, %s\n" % (p, ", ".join(sorted(aliases[p]))))
            else:
                output.write("%s\n" % p)
        changed = util.updateFile(output, css_properties_txt)

        # Create css_aliases.h from its template:
        changed = util.readTemplate(
            css_aliases_template_h, css_aliases_h,
            CssAliasesTemplateActionHandler(aliases)) or changed

        # Create css_property_strings.h from its template:
        changed = util.readTemplate(
            css_property_strings_template_h, css_property_strings_h,
            PropertyStringsTemplateActionHandler(sorted_properties)) or changed

        # Create css_properties.h from its template:
        changed = util.readTemplate(
            css_properties_template_h, css_properties_h,
            PropertiesTemplateActionHandler(
                sorted_properties, sorted_internal_properties)) or changed

        # Check that all properties are mCheck that all properties are in used
        # the atoms.txt file in dom as well.
        try:
            f = None
            util.fileTracker.addInput(atoms_txt)
            f = open(atoms_txt, "r")
            atoms = f.read().lower()
            for p in properties:
                if re.sub("-", "", p.lower()) not in atoms:
                    self.error(
                        "Warning: %s is missing from modules/dom/src/atoms.txt"
                        % p)
        finally:
            if f: f.close()

        if changed: result = 2
        else: result = 0
        return self.endTiming(result, quiet=quiet)
Esempio n. 8
0
        if module_sources.cpp(source_file):
            extension = "cpp"
        original_filename = sourcename[6:] + "." + extension
    else:
        extension = "c"
        filename = "opera_" + sourcename + ".cpp"
    file_path[-1] = filename

    options = module_sources.getSourceOptionString(source_file)
    if len(options) == 0:
        module_sources_out.write("%s\n" % '/'.join(file_path))
    else:
        module_sources_out.write("%s # [%s]\n" %
                                 ('/'.join(file_path).ljust(34), options))
    if util.readTemplate(
            wrapper_template, os.path.join(libopeayDir, *file_path),
            HandleTemplateAction(
                extension == "cpp", module_sources, source_file,
                "/".join(file_path[0:-1] + [original_filename]))):
        changed_files.append(os.path.join(*file_path))

if util.updateFile(module_sources_out,
                   os.path.join(libopeayDir, "module.sources")):
    print "module.sources updated"
else:
    print "module.sources not changed"
if len(changed_files):
    print "%d wrapper files updated:" % len(changed_files), changed_files
else:
    print "No wrapper file changed"
Esempio n. 9
0
def recommission(slaveName=None):
    '''
    This function recommissions, aka removes from excludes list and runs correct script to add back to node
    Input: None
    Output: None
    '''
    util.debug_print('calling upsize recommission()')

    # if not set, then get from excludes list
    if slaveName is None:
        util.debug_print('not slaveName passed as parameter')
        # get the excludes file from master
        excludes_file_content = util.get_file_content(
            config.DEFAULT_DESTINATION_EXCLUDES_FILENAME)

        # no magic, just get last one
        if len(excludes_file_content) > 0:
            slaveName = excludes_file_content[-1].strip()
        else:
            util.debug_print(
                'no slavename passed in as argument AND we got empty slaves file!'
            )
            return False

    # remove that slavename from excludes
    remove_line = slaveName + "\n"
    util.debug_print('removing from excludes file the line: ' + remove_line)
    update_excludes = util.updateFile('excludes', remove_line, addLine=False)

    # confirm if VM is running or stopped or whatever
    vmid = util.get_vm_id_by_name(slaveName)
    raw_result = api.listVirtualMachines({'id': vmid})
    result = raw_result.get('virtualmachine')[0]
    ipaddr = result.get('nic')[0].get('ipaddress')

    while True:
        current_state = result.get('state')
        if current_state == 'Running':
            util.debug_print('Machine is currently running')

            util.debug_print('trying to hdfs dfsadmin -refreshNodes')
            ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
            outmsg, errmsg = ssh.sudo_command(
                'sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"'
            )

            util.debug_print('trying to yarn rmadmin -refreshNodes')
            outmsg, errmsg = ssh.sudo_command(
                'sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"'
            )

            break
        elif current_state == 'Stopped':
            util.debug_print('Machine is currently Stopped')

            # start up machine and wait till it finishes starting up
            util.debug_print('Trying to start VM')
            result = api.startVirtualMachine({'id': vmid})

            # now we wait for the async deployVirtualMachine() to finsih
            waitResult = util.waitForAsync(result.get('jobid'))
            if waitResult != True:
                # whoops something went wrong!
                return waitResult

            # SSHWrapper checks and loops and waits for connection so let's just use it for testing that
            '''
            WHY DOES RESULT NOT GET ANYTHING! INEED THE IP OF THE DEVICE!  MAYBE GET IT EARLIER?
            '''
            sshWaiting = SSHWrapper.SSHWrapper(ipaddr)

            util.debug_print('trying to start-dfs.sh')
            ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
            ssh.sudo_command(
                'sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-dfs.sh"'
            )

            util.debug_print('trying to run start-yarn.sh')
            ssh.sudo_command(
                'sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-yarn.sh"'
            )

            util.debug_print('trying to hdfs dfsadmin -refreshNodes')
            ssh.sudo_command(
                'sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"'
            )

            util.debug_print('trying to yarn rmadmin -refreshNodes')
            ssh.sudo_command(
                'sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"'
            )

            break
        elif current_state == 'Stopping' or current_state == 'Starting':
            util.debug_print(
                'OK, currently changing state, let us just call listVirtualMachines again and see.'
            )
            raw_result = api.listVirtualMachines({'id': vmid})
            result = raw_result.get('virtualmachine')[0]
        else:
            # something went wrong here!
            util.debug_print('ok, it is in an unexpected state: ' +
                             str(current_state))
            return False
Esempio n. 10
0
def upsize():
    util.debug_print('calling upsize()')
    # now we need to get the next slave name
    util.debug_print('Trying to get slave name....') 
    next_name = getNextSlaveName()
    if not next_name:
        pp.pprint('Getting the next name failed.')
        return -1

    util.debug_print('slavename is: '+str(next_name))

    # try to deploy with new slave with correct name
    util.debug_print('Trying to deploy VM')
    result = api.deployVirtualMachine({ 'serviceofferingid': config.SERVICEOFFERINGID,
                                        'zoneid': config.ZONEID,
                                        'templateid': config.TEMPLATEID,
                                        'displayname': next_name,
                                        'name': next_name })

    # now we wait for the async deployVirtualMachine() to finsih
    waitResult = util.waitForAsync(result.get('jobid'))
    if waitResult != True:
        # whoops something went wrong!
        return waitResult

    # now check if deploy worked
    if 'errortext' in result:
        # oh man... failed!
        pp.pprint(result)
        return
    
    util.debug_print('OK, just created the VM')

    ''' now get ip of new machine ''' 
    # get info for newly generated machine
    result2 = api.listVirtualMachines({'id': result.get("id") })
    ip = result2.get('virtualmachine')[0].get('nic')[0].get('ipaddress')
    util.debug_print('IP of new machine is: '+str(ip))
    
    # clear out datanode
    dataNodessh = SSHWrapper.SSHWrapper(ip)
    dataNodessh.sudo_command('sudo -S su hduser -c "rm -rf /home/hduser/hadoop-tmp/hdfs/datanode/*"')
    
    # change the hostname
    util.update_hostname(ip, next_name, result.get("id"))
    
    # new line for /etc/hosts file
    new_hosts_line =  str(ip) + '\t' + str(next_name) + '\n'

    # update hosts file on master
    util.debug_print('Trying update Hosts file with line: '+new_hosts_line)
    util.updateFile('hosts', new_hosts_line)
    
    # update slaves file on master
    util.debug_print('Trying to update slaves file with name:' + next_name)
    util.updateFile('slaves', next_name + "\n")
    
    # now to start the start-dfs.sh and start-yarn.sh
    ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)

    util.debug_print('trying to start-dfs.sh')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-dfs.sh"')
    util.debug_print(outmsg)
    
    util.debug_print('trying to run start-yarn.sh')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-yarn.sh"')
    util.debug_print(outmsg)
    
    util.debug_print('DONE!')
Esempio n. 11
0
def recommission(slaveName = None):
    '''
    This function recommissions, aka removes from excludes list and runs correct script to add back to node
    Input: None
    Output: None
    '''
    util.debug_print('calling upsize recommission()')
    
    # if not set, then get from excludes list
    if slaveName is None:
        util.debug_print('not slaveName passed as parameter')
        # get the excludes file from master
        excludes_file_content = util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME)
        
        # no magic, just get last one
        if len(excludes_file_content) > 0:
            slaveName = excludes_file_content[-1].strip()
        else:
            util.debug_print('no slavename passed in as argument AND we got empty slaves file!')
            return False
        
    # remove that slavename from excludes
    remove_line = slaveName + "\n"
    util.debug_print('removing from excludes file the line: ' + remove_line)
    update_excludes = util.updateFile('excludes', remove_line, addLine = False)
    
    # confirm if VM is running or stopped or whatever
    vmid = util.get_vm_id_by_name(slaveName)
    raw_result = api.listVirtualMachines({'id':vmid})
    result = raw_result.get('virtualmachine')[0]
    ipaddr = result.get('nic')[0].get('ipaddress')
    
    while True:
        current_state = result.get('state')
        if current_state == 'Running':
            util.debug_print('Machine is currently running')
            
            util.debug_print('trying to hdfs dfsadmin -refreshNodes')
            ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
            outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"')
        
            util.debug_print('trying to yarn rmadmin -refreshNodes')
            outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"')
            
            
            break
        elif current_state == 'Stopped':
            util.debug_print('Machine is currently Stopped')
            
            # start up machine and wait till it finishes starting up
            util.debug_print('Trying to start VM')
            result = api.startVirtualMachine({'id': vmid})
        
            # now we wait for the async deployVirtualMachine() to finsih
            waitResult = util.waitForAsync(result.get('jobid'))
            if waitResult != True:
                # whoops something went wrong!
                return waitResult
            
            # SSHWrapper checks and loops and waits for connection so let's just use it for testing that
            '''
            WHY DOES RESULT NOT GET ANYTHING! INEED THE IP OF THE DEVICE!  MAYBE GET IT EARLIER?
            '''
            sshWaiting = SSHWrapper.SSHWrapper(ipaddr)
            
            util.debug_print('trying to start-dfs.sh')
            ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
            ssh.sudo_command('sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-dfs.sh"')
            
            util.debug_print('trying to run start-yarn.sh')
            ssh.sudo_command('sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-yarn.sh"')
            
            util.debug_print('trying to hdfs dfsadmin -refreshNodes')
            ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"')
        
            util.debug_print('trying to yarn rmadmin -refreshNodes')
            ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"')
            
            break
        elif current_state == 'Stopping' or current_state == 'Starting':
            util.debug_print('OK, currently changing state, let us just call listVirtualMachines again and see.')
            raw_result = api.listVirtualMachines({'id':vmid})
            result = raw_result.get('virtualmachine')[0]
        else:
            # something went wrong here!
            util.debug_print('ok, it is in an unexpected state: ' + str(current_state))
            return False
Esempio n. 12
0
    def save(self, filename):
        """Saves the content to the specified filename.

        @param filename is the path and filename to the
          'module.sources' file to save.

        If the filename already exists and saved content is the same as the
        filename then it will not update file. This avoids unecessary updates
        to files used in build systems.

        Return True if the file was updated, False otherwise.
        """
        out = ""
        abs_root = os.path.normpath(os.path.dirname(os.path.abspath(filename)))

        def make_relative(fname):
            fname = os.path.normpath(os.path.abspath(fname))
            if fname.startswith(abs_root):
                fname = fname[len(abs_root) + 1:]
            else:
                fname = fname
            return fname.replace(os.path.sep, "/")

        all = set(self.__plain_sources.all())
        pch = set(self.__plain_sources.pch())
        nopch = set(self.__plain_sources.nopch())
        pch_system_includes = set(self.__plain_sources.pch_system_includes())
        pch_jumbo = set(self.__plain_sources.pch_jumbo())

        jumbo_units = self.__jumbo_compile_units
        for jumbo_unit in jumbo_units.itervalues():
            out += "# [pch=%d;system_includes=%d;jumbo=%s]\n" % (int(
                jumbo_unit.pch()), int(
                    jumbo_unit.system_includes()), jumbo_unit.name())
            for fname in jumbo_unit.source_files():
                options = ""
                if jumbo_unit.source_file_options(fname):
                    options = " # [%s]" % ",".join(
                        map(
                            lambda (k, v): '%s=%s' % (k, v),
                            zip(
                                jumbo_unit.source_file_options(fname).keys(),
                                jumbo_unit.source_file_options(
                                    fname).values())))
                out += make_relative(fname) + options + "\n"
                for src_set in (all, pch, nopch, pch_system_includes,
                                pch_jumbo):
                    src_set.discard(fname)

        plain = all - pch - nopch - pch_system_includes - pch_jumbo

        if pch_jumbo:
            out += "# [pch=1;jumbo=1]\n"
            for fname in pch_jumbo:
                out += make_relative(fname) + "\n"
        if pch:
            out += "# [pch=1]\n"
            for fname in pch:
                out += make_relative(fname) + "\n"
        if nopch:
            out += "# [pch=0]\n"
            for fname in nopch:
                out += make_relative(fname) + "\n"
        if pch_system_includes:
            out += "# [pch=1;system_includes=0]\n"
            for fname in pch_system_includes:
                out += make_relative(fname) + "\n"
        if plain:
            out += "# [pch=0;jumbo=0]\n"
            for fname in plain:
                out += make_relative(fname) + "\n"

        output = StringIO.StringIO()
        output.write(out)
        return util.updateFile(output, filename)
Esempio n. 13
0
 def updateFile(list, filename):
     content = StringIO.StringIO()
     for f in list:
         print >> content, f
     return util.updateFile(content, filename)
Esempio n. 14
0
def upsize():
    util.debug_print('calling upsize()')
    # now we need to get the next slave name
    util.debug_print('Trying to get slave name....')
    next_name = getNextSlaveName()
    if not next_name:
        pp.pprint('Getting the next name failed.')
        return -1

    util.debug_print('slavename is: ' + str(next_name))

    # try to deploy with new slave with correct name
    util.debug_print('Trying to deploy VM')
    result = api.deployVirtualMachine({
        'serviceofferingid': config.SERVICEOFFERINGID,
        'zoneid': config.ZONEID,
        'templateid': config.TEMPLATEID,
        'displayname': next_name,
        'name': next_name
    })

    # now we wait for the async deployVirtualMachine() to finsih
    waitResult = util.waitForAsync(result.get('jobid'))
    if waitResult != True:
        # whoops something went wrong!
        return waitResult

    # now check if deploy worked
    if 'errortext' in result:
        # oh man... failed!
        pp.pprint(result)
        return

    util.debug_print('OK, just created the VM')
    ''' now get ip of new machine '''
    # get info for newly generated machine
    result2 = api.listVirtualMachines({'id': result.get("id")})
    ip = result2.get('virtualmachine')[0].get('nic')[0].get('ipaddress')
    util.debug_print('IP of new machine is: ' + str(ip))

    # clear out datanode
    dataNodessh = SSHWrapper.SSHWrapper(ip)
    dataNodessh.sudo_command(
        'sudo -S su hduser -c "rm -rf /home/hduser/hadoop-tmp/hdfs/datanode/*"'
    )

    # change the hostname
    util.update_hostname(ip, next_name, result.get("id"))

    # new line for /etc/hosts file
    new_hosts_line = str(ip) + '\t' + str(next_name) + '\n'

    # update hosts file on master
    util.debug_print('Trying update Hosts file with line: ' + new_hosts_line)
    util.updateFile('hosts', new_hosts_line)

    # update slaves file on master
    util.debug_print('Trying to update slaves file with name:' + next_name)
    util.updateFile('slaves', next_name + "\n")

    # now to start the start-dfs.sh and start-yarn.sh
    ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)

    util.debug_print('trying to start-dfs.sh')
    outmsg, errmsg = ssh.sudo_command(
        'sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-dfs.sh"'
    )
    util.debug_print(outmsg)

    util.debug_print('trying to run start-yarn.sh')
    outmsg, errmsg = ssh.sudo_command(
        'sudo -S su hduser -c "bash /home/hduser/hadoop-2.7.0/sbin/start-yarn.sh"'
    )
    util.debug_print(outmsg)

    util.debug_print('DONE!')
Esempio n. 15
0
module_sources.write("# Note: The generated source files needs to include several\n")
module_sources.write("# system header files. Thus it defines COMPILING_SQLITE before\n")
module_sources.write("# including core/pch_system_includes.h so the platform's system.h is\n")
module_sources.write("# able to prepare for that. So all files need options\n")
module_sources.write("# [no-jumbo;system_includes;no-pch]\n")

verbose("Scanning for .c files...")

# find the .c files
for filename in glob.glob(os.path.join(sqliteDir, "src", "*.c")):
    verbose(" Found %s" % relative_filename(filename), 1)
    basename = os.path.basename(filename)
    if basename not in exclude_files:
		wrapper_filename = os.path.join(sqliteDir, "generated", "sqlite_" + basename)
		verbose("  Wrapping %s into %s" % (relative_filename(filename), relative_filename(wrapper_filename)), 2)
		if util.readTemplate(wrapper_template, wrapper_filename,
							 HandleTemplateAction(basename)):
			verbose("  Updated %s" % relative_filename(wrapper_filename))
		else:
			verbose("  File %s not changed" % relative_filename(wrapper_filename), 2)
		module_sources.write("%s # [winnopch]\n" % '/'.join(["generated", "sqlite_" + basename]))
    else:
        verbose("  Excluded %s" % filename, 2)

verbose("Finished wrapping .c files.");

if (util.updateFile(module_sources, os.path.join(sqliteDir, "module.sources"))):
	verbose("Updated module.sources")
else:
	verbose("module.sources not changed")
Esempio n. 16
0
    def __call__(self, sourceRoot, outputRoot=None, quiet="yes"):
        self.startTiming()
        if outputRoot is None: outputRoot = sourceRoot

        # File names
        css_properties_txt = os.path.join(sourceRoot, "modules", "style", "src", "css_properties.txt")
        css_aliases_template_h = os.path.join(sourceRoot, "modules", "style", "src", "css_aliases_template.h")
        css_aliases_h = os.path.join(outputRoot, "modules", "style", "src", "css_aliases.h")
        css_property_strings_template_h = os.path.join(sourceRoot, "modules", "style", "src", "css_property_strings_template.h")
        css_property_strings_h = os.path.join(outputRoot, "modules", "style", "src", "css_property_strings.h")
        css_properties_template_h = os.path.join(sourceRoot, "modules", "style", "src", "css_properties_template.h")
        css_properties_h = os.path.join(outputRoot, "modules", "style", "src", "css_properties.h")
        css_properties_internal_txt = os.path.join(sourceRoot, "modules", "style", "src", "css_properties_internal.txt")
        atoms_txt = os.path.join(sourceRoot, "modules", "dom", "src", "atoms.txt")

        # Read the property names from css_properties.txt into a set, with
        # their aliases in a dictionary, and the properties which are not
        # aliases into another set
        properties = set([])
        non_alias_properties = set([])
        aliases = dict({})
        try:
            f = None
            util.fileTracker.addInput(css_properties_txt)
            f = open(css_properties_txt)
            for line in f.read().split("\n"):
                if line:
                    props = line.split(",")
                    for p in props:
                        p = p.strip()
                        if p not in properties:
                            properties.add(p)
                        else:
                            self.error("Error: css property '%s' declared multiple times." % p)
                            return self.endTiming(1, quiet=quiet)
                    name = props[0].strip()
                    non_alias_properties.add(name)
                    if len(props) >= 2:
                        for a in props[1:]:
                            a = a.strip()
                            aliases.setdefault(name, []).append(a)
        finally:
            if f: f.close()
        sorted_properties = sorted(properties, cmp=sort_properties)

        # Read internal property names from css_propertiesi_internal.txt into a
        # set
        internal_properties = set([])
        try:
            f = None
            util.fileTracker.addInput(css_properties_internal_txt)
            f = open(css_properties_internal_txt)
            for line in f.read().split("\n"):
                p = line.strip()
                if p:
                    if p not in internal_properties and p not in properties:
                        internal_properties.add(p)
                    else:
                        self.error("Error: css property '%s' declared multiple times." % p)
                        return self.endTiming(1, quiet=quiet)
        finally:
            if f: f.close()
        sorted_internal_properties = sorted(internal_properties, cmp=sort_properties)

        # Regenerate css_properties.txt, with sorted non-alias properties each
        # followed by their alias to css_properties.txt,:
        output = StringIO.StringIO()
        for p in sorted(non_alias_properties, cmp=sort_properties):
            if p in aliases:
                output.write("%s, %s\n" % (p, ", ".join(sorted(aliases[p]))) )
            else:
                output.write("%s\n" % p)
        changed = util.updateFile(output, css_properties_txt)

        # Create css_aliases.h from its template:
        changed = util.readTemplate(css_aliases_template_h, css_aliases_h,
                        CssAliasesTemplateActionHandler(aliases)) or changed

        # Create css_property_strings.h from its template:
        changed = util.readTemplate(css_property_strings_template_h, css_property_strings_h,
                        PropertyStringsTemplateActionHandler(sorted_properties)) or changed

        # Create css_properties.h from its template:
        changed = util.readTemplate(css_properties_template_h, css_properties_h,
                        PropertiesTemplateActionHandler(sorted_properties, sorted_internal_properties)) or changed

        # Check that all properties are mCheck that all properties are in used
        # the atoms.txt file in dom as well.
        try:
            f = None
            util.fileTracker.addInput(atoms_txt)
            f = open(atoms_txt, "r")
            atoms = f.read().lower()
            for p in properties:
                if re.sub("-", "", p.lower()) not in atoms:
                    self.error("Warning: %s is missing from modules/dom/src/atoms.txt" % p)
        finally:
            if f: f.close()

        if changed: result = 2
        else: result = 0
        return self.endTiming(result, quiet=quiet)
Esempio n. 17
0
        if module_sources.cpp(source_file):
            extension = "cpp"
        original_filename = sourcename[6:] + "." + extension
    else:
        extension = "c"
        filename = "opera_" + sourcename + ".cpp"
    file_path[-1] = filename;

    options = module_sources.getSourceOptionString(source_file)
    if len(options) == 0:
        module_sources_out.write("%s\n" % '/'.join(file_path))
    else:
        module_sources_out.write("%s # [%s]\n" % ('/'.join(file_path).ljust(34), options))
    if util.readTemplate(wrapper_template,
                         os.path.join(libopeayDir, *file_path),
                         HandleTemplateAction(extension=="cpp",
                                              module_sources,
                                              source_file,
                                              "/".join(file_path[0:-1] + [original_filename]))):
        changed_files.append(os.path.join(*file_path))

if util.updateFile(module_sources_out,
                   os.path.join(libopeayDir, "module.sources")):
    print "module.sources updated"
else:
    print "module.sources not changed"
if len(changed_files):
    print "%d wrapper files updated:" % len(changed_files), changed_files
else:
    print "No wrapper file changed"