Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(10)  |  Call(10)  |  Derive(0)  |  Import(0)

src/s/a/sahara-2014.1/sahara/service/periodic.py   sahara(Download)
                timeutils.parse_isotime(cluster.updated_at))
            current_time = timeutils.utcnow()
            spacing = timeutils.delta_seconds(cluster_updated_at, current_time)
            if spacing < CONF.min_transient_cluster_active_time:
                continue

src/s/a/sahara-HEAD/sahara/service/periodic.py   sahara(Download)
                timeutils.parse_isotime(cluster.updated_at))
            current_time = timeutils.utcnow()
            spacing = timeutils.delta_seconds(cluster_updated_at, current_time)
            if spacing < CONF.min_transient_cluster_active_time:
                continue

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v2_3_0/scaling.py   sahara(Download)
def _check_decommission(cluster, instances, check_func, timeout):
    s_time = timeutils.utcnow()
    while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
        statuses = check_func(cluster)
        dec_ok = True

src/s/a/sahara-2014.1/sahara/plugins/vanilla/v1_2_1/scaling.py   sahara(Download)
        all_found = False
 
        while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
            cmd = r.execute_command(
                "sudo su -c 'hadoop dfsadmin -report' hadoop")

src/s/a/sahara-HEAD/sahara/plugins/vanilla/v2_3_0/scaling.py   sahara(Download)
def _check_decommission(cluster, instances, check_func, timeout):
    s_time = timeutils.utcnow()
    while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
        statuses = check_func(cluster)
        dec_ok = True

src/s/a/sahara-HEAD/sahara/plugins/vanilla/v1_2_1/scaling.py   sahara(Download)
        all_found = False
 
        while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
            cmd = r.execute_command(
                "sudo su -c 'hadoop dfsadmin -report' hadoop")

src/s/a/sahara-2014.1/sahara/openstack/common/loopingcall.py   sahara(Download)
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec') %

src/s/a/sahara-HEAD/sahara/openstack/common/loopingcall.py   sahara(Download)
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec') %

src/s/a/sahara-2014.1/sahara/tests/integration/tests/vanilla_transient_cluster.py   sahara(Download)
            raise_failure = True
            while timeutils.delta_seconds(
                    s_time, timeutils.utcnow()) < timeout:
                try:
                    self.sahara.clusters.get(cluster_info['cluster_id'])

src/s/a/sahara-HEAD/sahara/tests/integration/tests/vanilla_transient_cluster.py   sahara(Download)
            # wait for cluster deleting
            while timeutils.delta_seconds(
                    s_time, timeutils.utcnow()) < timeout:
                try:
                    self.sahara.clusters.get(cluster_info['cluster_id'])