Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(14)  |  Call(14)  |  Derive(0)  |  Import(0)

src/s/a/sahara-2014.1/sahara/plugins/intel/v3_0_2/installer.py   sahara(Download)
 
    services = []
    if u.get_namenode(cluster):
        services += ['hdfs']
 

src/s/a/sahara-2014.1/sahara/plugins/intel/v2_5_1/installer.py   sahara(Download)
 
    services = []
    if u.get_namenode(cluster):
        services += ['hdfs']
 

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v1_2_1/versionhandler.py   sahara(Download)
    def start_cluster(self, cluster):
        nn_instance = utils.get_namenode(cluster)
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")
 
        LOG.info("Waiting %s datanodes to start up" % datanodes_count)
        with remote.get_remote(utils.get_namenode(cluster)) as r:
            while True:
                if run.check_datanodes_count(r, datanodes_count):
                decommission_tts = True
 
        nn = utils.get_namenode(cluster)
        jt = utils.get_jobtracker(cluster)
 
    def scale_cluster(self, cluster, instances):
        self._setup_instances(cluster, instances)
 
        run.refresh_nodes(remote.get_remote(
            utils.get_namenode(cluster)), "dfsadmin")
    def _set_cluster_info(self, cluster):
        nn = utils.get_namenode(cluster)
        jt = utils.get_jobtracker(cluster)
        oozie = utils.get_oozie(cluster)
        info = {}

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v2_3_0/versionhandler.py   sahara(Download)
    def start_cluster(self, cluster):
        nn = utils.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')
 
    def _set_cluster_info(self, cluster):
        nn = utils.get_namenode(cluster)
        rm = utils.get_resourcemanager(cluster)
        hs = utils.get_historyserver(cluster)
        oo = utils.get_oozie(cluster)

src/s/a/sahara-2014.1/sahara/plugins/intel/v3_0_2/versionhandler.py   sahara(Download)
    def _set_cluster_info(self, cluster):
        mng = u.get_instances(cluster, 'manager')[0]
        nn = u.get_namenode(cluster)
        jt = u.get_resourcemanager(cluster)
        oozie = u.get_oozie(cluster)

src/s/a/sahara-2014.1/sahara/plugins/intel/v2_5_1/versionhandler.py   sahara(Download)
    def _set_cluster_info(self, cluster):
        mng = u.get_instances(cluster, 'manager')[0]
        nn = u.get_namenode(cluster)
        jt = u.get_jobtracker(cluster)
        oozie = u.get_oozie(cluster)

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v1_2_1/config_helper.py   sahara(Download)
def generate_sahara_configs(cluster, node_group=None):
    nn_hostname = _get_hostname(utils.get_namenode(cluster))
    jt_hostname = _get_hostname(utils.get_jobtracker(cluster))
    oozie_hostname = _get_hostname(utils.get_oozie(cluster))
    hive_hostname = _get_hostname(utils.get_hiveserver(cluster))

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v2_3_0/run_scripts.py   sahara(Download)
def refresh_hadoop_nodes(cluster):
    nn = u.get_namenode(cluster)
    nn.remote().execute_command(
        'sudo su - -c "hdfs dfsadmin -refreshNodes" hadoop')
 

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v2_3_0/utils.py   sahara(Download)
def get_datanodes_status(cluster):
    statuses = {}
    namenode = u.get_namenode(cluster)
    status_regexp = r'^Hostname: (.*)\nDecommission Status : (.*)$'
    matcher = re.compile(status_regexp, re.MULTILINE)