Changeset 1f1d089


Ignore:
Timestamp:
Sep 4, 2009 7:58:15 PM (11 years ago)
Author:
zzz <zzz@…>
Branches:
master
Children:
1850e89, 2341793, b2d6b60
Parents:
5372a50
Message:

Move FloodSearchJob? to its own file, history for two props, -19

Files:
1 added
3 edited

Legend:

Unmodified
Added
Removed
  • history.txt

    r5372a50 r1f1d089  
     12009-09-04 zzz
     2    * SessionKeyManager, OCMOSJ, Garlic:
     3      - Enable per-client SessionKeyManagers for better anonymity
     4      - tagsDelivered() now means tags are sent, not acked.
     5      - OCMOSJ uses the new TagSetHandle object returned from tagsDelivered()
     6        to call tagsAcked() or failTags() as appropriate.
     7      - Assume tags delivered on an established session to
     8        reduce streaming lib stalls caused by massive tag deliveries;
     9        should increase throughput and window sizes on long-lived streams
     10      - Unacked tagsets on a new session are stored on a separate list
     11      - Don't kill an OB Session just because it's temporarily out of tags
     12      - Increase min tag threshold to 30 (was 20) due to new speculative
     13        tags delivered scheme, and to increase effective max window
     14      - More Java 5 and dead code cleanups, and more comments and javadoc,
     15        debug logging cleanups
     16      - Key toString()s for easier debugging
     17      - HandleGarlicMessageJob: cleanup of unused things
     18    * Tunnel TestJob:
     19      - Consume the tag after a failed test so it doesn't
     20        stay in the SKM
     21      - Disable tests with router.disableTunnelTesting=true
     22    * configkeyring.jsp: Add delete and cancel buttons
     23    * Logging: Fix directory for rotated log
     24    * TunnelDispatcher: Cleanup
     25
    1262009-09-02 sponge
    227    * Small logic fix for dr|z3d
  • router/java/src/net/i2p/router/RouterVersion.java

    r5372a50 r1f1d089  
    1919    public final static String ID = "Monotone";
    2020    public final static String VERSION = CoreVersion.VERSION;
    21     public final static long BUILD = 18;
     21    public final static long BUILD = 19;
    2222    /** for example "-test" */
    2323    public final static String EXTRA = "";
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java

    r5372a50 r1f1d089  
    352352    }
    353353}
    354 
    355 /**
    356  * Try sending a search to some floodfill peers, but if we don't get a successful
    357  * match within half the allowed lookup time, give up and start querying through
    358  * the normal (kademlia) channels.  This should cut down on spurious lookups caused
    359  * by simple delays in responses from floodfill peers
    360  *
    361  */
    362 class FloodSearchJob extends JobImpl {
    363     private Log _log;
    364     private FloodfillNetworkDatabaseFacade _facade;
    365     private Hash _key;
    366     private final List _onFind;
    367     private final List _onFailed;
    368     private long _expiration;
    369     private int _timeoutMs;
    370     private long _origExpiration;
    371     private boolean _isLease;
    372     private volatile int _lookupsRemaining;
    373     private volatile boolean _dead;
    374     public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
    375         super(ctx);
    376         _log = ctx.logManager().getLog(FloodSearchJob.class);
    377         _facade = facade;
    378         _key = key;
    379         _onFind = new ArrayList();
    380         _onFind.add(onFind);
    381         _onFailed = new ArrayList();
    382         _onFailed.add(onFailed);
    383         int timeout = -1;
    384         timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
    385         if (timeout < timeoutMs)
    386             timeout = timeoutMs;
    387         _timeoutMs = timeout;
    388         _expiration = timeout + ctx.clock().now();
    389         _origExpiration = timeoutMs + ctx.clock().now();
    390         _isLease = isLease;
    391         _lookupsRemaining = 0;
    392         _dead = false;
    393     }
    394     void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
    395         if (_dead) {
    396             getContext().jobQueue().addJob(onFailed);
    397         } else {
    398             if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
    399             if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
    400         }
    401     }
    402     public long getExpiration() { return _expiration; }
    403     private static final int CONCURRENT_SEARCHES = 2;
    404     private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
    405     private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
    406     public void runJob() {
    407         // pick some floodfill peers and send out the searches
    408         List floodfillPeers = _facade.getFloodfillPeers();
    409         FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
    410         ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
    411         Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
    412         OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
    413 
    414         for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
    415             Hash peer = (Hash)floodfillPeers.get(i);
    416             if (peer.equals(getContext().routerHash()))
    417                 continue;
    418            
    419             DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
    420             TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
    421             TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
    422             if ( (replyTunnel == null) || (outTunnel == null) ) {
    423                 _dead = true;
    424                 List removed = null;
    425                 synchronized (_onFailed) {
    426                     removed = new ArrayList(_onFailed);
    427                     _onFailed.clear();
    428                 }
    429                 while (removed.size() > 0)
    430                     getContext().jobQueue().addJob((Job)removed.remove(0));
    431                 getContext().messageRegistry().unregisterPending(out);
    432                 return;
    433             }
    434             dlm.setFrom(replyTunnel.getPeer(0));
    435             dlm.setMessageExpiration(getContext().clock().now()+10*1000);
    436             dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
    437             dlm.setSearchKey(_key);
    438            
    439             if (_log.shouldLog(Log.INFO))
    440                 _log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
    441             getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
    442             _lookupsRemaining++;
    443         }
    444        
    445         if (_lookupsRemaining <= 0) {
    446             if (_log.shouldLog(Log.INFO))
    447                 _log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
    448             // no floodfill peers, go to the normal ones
    449             getContext().messageRegistry().unregisterPending(out);
    450             _facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
    451         }
    452     }
    453     public String getName() { return "NetDb search (phase 1)"; }
    454    
    455     Hash getKey() { return _key; }
    456     void decrementRemaining() { _lookupsRemaining--; }
    457     int getLookupsRemaining() { return _lookupsRemaining; }
    458    
    459     void failed() {
    460         if (_dead) return;
    461         _dead = true;
    462         int timeRemaining = (int)(_origExpiration - getContext().clock().now());
    463         if (_log.shouldLog(Log.INFO))
    464             _log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
    465         if (timeRemaining > 0) {
    466             _facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
    467         } else {
    468             List removed = null;
    469             synchronized (_onFailed) {
    470                 removed = new ArrayList(_onFailed);
    471                 _onFailed.clear();
    472             }
    473             while (removed.size() > 0)
    474                 getContext().jobQueue().addJob((Job)removed.remove(0));
    475         }
    476     }
    477     void success() {
    478         if (_dead) return;
    479         if (_log.shouldLog(Log.INFO))
    480             _log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
    481         _dead = true;
    482         _facade.complete(_key);
    483         List removed = null;
    484         synchronized (_onFind) {
    485             removed = new ArrayList(_onFind);
    486             _onFind.clear();
    487         }
    488         while (removed.size() > 0)
    489             getContext().jobQueue().addJob((Job)removed.remove(0));
    490     }
    491 }
    492 
    493 class FloodLookupTimeoutJob extends JobImpl {
    494     private FloodSearchJob _search;
    495     public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
    496         super(ctx);
    497         _search = job;
    498     }
    499     public void runJob() {
    500         _search.decrementRemaining();
    501         if (_search.getLookupsRemaining() <= 0)
    502             _search.failed();
    503     }
    504     public String getName() { return "NetDb search (phase 1) timeout"; }
    505 }
    506 
    507 class FloodLookupMatchJob extends JobImpl implements ReplyJob {
    508     private Log _log;
    509     private FloodSearchJob _search;
    510     public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
    511         super(ctx);
    512         _log = ctx.logManager().getLog(FloodLookupMatchJob.class);
    513         _search = job;
    514     }
    515     public void runJob() {
    516         if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
    517              (getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
    518             _search.success();
    519         } else {
    520             int remaining = _search.getLookupsRemaining();
    521             if (_log.shouldLog(Log.INFO))
    522                 _log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
    523                           + _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
    524             // netDb reply pointing us at other people
    525             if (remaining <= 0)
    526                 _search.failed();
    527         }
    528     }
    529     public String getName() { return "NetDb search (phase 1) match"; }
    530     public void setMessage(I2NPMessage message) {}
    531 }
    532 
    533 class FloodLookupSelector implements MessageSelector {
    534     private RouterContext _context;
    535     private FloodSearchJob _search;
    536     public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
    537         _context = ctx;
    538         _search = search;
    539     }
    540     public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
    541     public long getExpiration() { return _search.getExpiration(); }
    542     public boolean isMatch(I2NPMessage message) {
    543         if (message == null) return false;
    544         if (message instanceof DatabaseStoreMessage) {
    545             DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
    546             // is it worth making sure the reply came in on the right tunnel?
    547             if (_search.getKey().equals(dsm.getKey())) {
    548                 _search.decrementRemaining();
    549                 return true;
    550             }
    551         } else if (message instanceof DatabaseSearchReplyMessage) {
    552             DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
    553             if (_search.getKey().equals(dsrm.getSearchKey())) {
    554                 _search.decrementRemaining();
    555                 return true;
    556             }
    557         }
    558         return false;
    559     }   
    560 }
Note: See TracChangeset for help on using the changeset viewer.