Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(175)  |  Call(165)  |  Derive(0)  |  Import(10)
Return the current time in seconds with high precision (unix version, use Manager.time() to stay platform independent).

        def unixTime():
    """Return the current time in seconds with high precision (unix version, use Manager.time() to stay platform independent)."""
    return systime.time()
        


src/l/o/Look-MLKademlia-HEAD/pymdht/DEPRECATED_server_dht.py   Look-MLKademlia(Download)
def _on_peers_found(peers):
    if peers:
        print '[%.4f] %d peer(s)' % (time.time() - start_ts, len(peers))
    else:
        print '[%.4f] END OF LOOKUP' % (time.time() - start_ts)
            dht.get_peers(info_hash, _on_peers_found)
            global start_ts
            start_ts = time.time()
            time.sleep(options.lookup_interval)
 

src/t/g/tgs-android-HEAD/res/raw/routing_nice_rtt.py   tgs-android(Download)
            if m_bucket:
                rnode = m_bucket.get_stalest_rnode()
                if time.time() > rnode.last_seen + QUARANTINE_PERIOD:
                    result = rnode
            if self._next_stale_maintenance_index == starting_index:
        # The main bucket is full
        # Let's see whether this node's latency is good
        current_time = time.time()
        rnode_to_be_replaced = None
        m_bucket.rnodes.sort(key=attrgetter('rtt'), reverse=True)
 
        """
        current_time = time.time()
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout += 1
        """
        rnode.rtt = rtt
        current_time = time.time()
        #rnode._reset_refresh_task()
        if rnode.in_quarantine:

src/s/w/swiftarm-HEAD/tribler/Tribler/Core/DecentralizedTracking/pymdht/core/controller.py   swiftarm(Download)
            self._my_node.id, self.msg_f) 
 
        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
    def _get_cached_peers(self, info_hash):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        for ts, cached_info_hash, peers in self._cached_lookups:
            if ts > oldest_valid_ts and info_hash == cached_info_hash:
                return peers
 
    def _add_cache_peers(self, info_hash, peers):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
            self._cached_lookups[-1][2].extend(peers)
        else:
            self._cached_lookups.append((time.time(), info_hash, peers))
 
    def _try_do_lookup(self):
            queries_to_send = lookup_obj.start(bootstrap_rnodes)
        else:
            next_lookup_attempt_ts = time.time() + .2
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               next_lookup_attempt_ts)

src/p/y/pymdht-HEAD/core/controller.py   pymdht(Download)
            self._my_node.id, self.msg_f) 
 
        current_ts = time.time()
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
    def _get_cached_peers(self, info_hash):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        for ts, cached_info_hash, peers in self._cached_lookups:
            if ts > oldest_valid_ts and info_hash == cached_info_hash:
                return peers
 
    def _add_cache_peers(self, info_hash, peers):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
            self._cached_lookups[-1][2].extend(peers)
        else:
            self._cached_lookups.append((time.time(), info_hash, peers))
 
    def print_routing_table_stats(self):
 
        queries_to_send = []
        current_ts = time.time()
        #TODO: I think this if should be removed
        # At most, 1 second between calls to main_loop after the first call

src/l/o/Look-MLKademlia-HEAD/pymdht/core/minitwisted.py   Look-MLKademlia(Download)
        self.args = args
        self.kwds = kwds
        self.call_time = time.time() + self.delay
        self._cancelled = False
 
 
        """
        current_time = time.time()
        if self.next_task is None:
            # no pending tasks
    def run(self):
        """Main loop activated by calling self.start()"""
 
        last_task_run = time.time()
        stop_flag = self.stop_flag
 
            if timeout_raised or \
                   time.time() - last_task_run > self.task_interval:
                #with self._lock:
                self._lock.acquire()

src/t/g/tgs-android-HEAD/res/raw/controller.py   tgs-android(Download)
            self._my_node.id, self.msg_f) 
 
        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
    def _get_cached_peers(self, info_hash):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        for ts, cached_info_hash, peers in self._cached_lookups:
            if ts > oldest_valid_ts and info_hash == cached_info_hash:
                return peers
 
    def _add_cache_peers(self, info_hash, peers):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
            self._cached_lookups[-1][2].extend(peers)
        else:
            self._cached_lookups.append((time.time(), info_hash, peers))
 
    def _try_do_lookup(self):
            queries_to_send = lookup_obj.start(bootstrap_rnodes)
        else:
            next_lookup_attempt_ts = time.time() + .2
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               next_lookup_attempt_ts)

src/s/w/swiftarm-HEAD/tribler/Tribler/Core/DecentralizedTracking/pymdht/core/minitwisted.py   swiftarm(Download)
 
        # Call main_loop
        if time.time() >= self._next_main_loop_call_ts:
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._main_loop_f()
                'Got socket.error when receiving data:\n%s' % e)
        else:
            self._add_capture((time.time(), addr, False, data))
            ip_is_blocked = self.floodbarrier_active and \
                            self.floodbarrier.ip_blocked(addr[0])
            logging.error('data,addr: %s %s' % (datagram.data, datagram.addr))
            raise
        self._add_capture((time.time(), datagram.addr, True, datagram.data))
 

src/s/w/swiftarm-HEAD/tribler/Tribler/Core/DecentralizedTracking/pymdht/core/cache.py   swiftarm(Download)
    def __init__(self, info_hash):
        self.info_hash = info_hash
        self.start_ts = time.time()
        self.peers = set()
 
    def put_cached_lookup(self, cached_lookup):
        # first remove expired chached lookups
        for i in range(len(self.cached_lookups)):
            if time.time() > (self.cached_lookups[i].start_ts +
                              self.validity_time):
    def get_cached_lookup(self, info_hash):
        for cached_lookup in self.cached_lookups:
            if cached_lookup.info_hash == info_hash:
                if time.time() < cached_lookup.start_ts + self.validity_time:
                    return cached_lookup.peers, CACHING_NODE

src/p/y/pymdht-HEAD/core/minitwisted.py   pymdht(Download)
 
        # Call main_loop
        if time.time() >= self._next_main_loop_call_ts:
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._main_loop_f()
                'Got socket.error when receiving data:\n%s' % e)
        else:
            self._add_capture((time.time(), addr, False, data))
            ip_is_blocked = self.floodbarrier_active and \
                            self.floodbarrier.ip_blocked(addr[0])
            logging.error('data,addr: %s %s' % (datagram.data, datagram.addr))
            raise
        self._add_capture((time.time(), datagram.addr, True, datagram.data))
 

src/p/y/pymdht-HEAD/core/cache.py   pymdht(Download)
    def __init__(self, info_hash):
        self.info_hash = info_hash
        self.start_ts = time.time()
        self.peers = set()
 
    def put_cached_lookup(self, cached_lookup):
        # first remove expired chached lookups
        for i in range(len(self.cached_lookups)):
            if time.time() > (self.cached_lookups[i].start_ts +
                              self.validity_time):
    def get_cached_lookup(self, info_hash):
        for cached_lookup in self.cached_lookups:
            if cached_lookup.info_hash == info_hash:
                if time.time() < cached_lookup.start_ts + self.validity_time:
                    return cached_lookup.peers, CACHING_NODE

  1 | 2 | 3 | 4 | 5 | 6 | 7  Next