Changeset dcc1861


Ignore:
Timestamp:
Jul 29, 2011 8:43:52 PM (10 years ago)
Author:
zzz <zzz@…>
Branches:
master
Children:
7180417
Parents:
e7f4494
Message:
  • Netdb Search:
    • Follow all DSRM's, not just the last one, by moving the code from the match job to the selector
    • Update peer profile after SingleSearchJob?
    • Cleanups, javadocs, log tweaks, final
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • history.txt

    re7f4494 rdcc1861  
     12011-07-29 zzz
     2  * Netdb Search:
     3    - Follow all DSRM's, not just the last one, by moving the code
     4      from the match job to the selector
     5    - Update peer profile after SingleSearchJob
     6    - Cleanups, javadocs, log tweaks, final
     7  * ProfileOrganizer: Tweak fast tier size
     8
    192011-07-28 zzz
    210  * Context: Split up big lock to avoid deadlocks
  • router/java/src/net/i2p/router/RouterVersion.java

    re7f4494 rdcc1861  
    1919    public final static String ID = "Monotone";
    2020    public final static String VERSION = CoreVersion.VERSION;
    21     public final static long BUILD = 16;
     21    public final static long BUILD = 17;
    2222
    2323    /** for example "-test" */
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupMatchJob.java

    re7f4494 rdcc1861  
    1515    private final Log _log;
    1616    private final FloodOnlySearchJob _search;
    17     private DatabaseSearchReplyMessage _dsrm;
    1817
    1918    public FloodOnlyLookupMatchJob(RouterContext ctx, FloodOnlySearchJob job) {
     
    2928            _search.success();
    3029        } else {
    31             int remaining = _search.getLookupsRemaining();
    32             if (_log.shouldLog(Log.INFO))
    33                 _log.info(_search.getJobId() + ": got a DatabaseSearchReply when we were looking for "
    34                           + _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
    35             // netDb reply pointing us at other people
    36             // Only process if we don't know enough floodfills
    37             // This only works if both reply, otherwise we aren't called - should be fixed
    38             if (_search.shouldProcessDSRM() && _dsrm != null) {
    39                 if (_log.shouldLog(Log.INFO))
    40                     _log.info(_search.getJobId() + ": Processing DatabaseSearchReply");
    41                 // Chase the hashes from the reply
    42                 getContext().jobQueue().addJob(new SingleLookupJob(getContext(), _dsrm));
    43             }
     30            // In practice, we always have zero remaining when this is called,
     31            // because the selector only returns true when there is zero remaining
    4432            _search.failed();
    4533        }
     
    5038    public void setMessage(I2NPMessage message) {
    5139        if (message instanceof DatabaseSearchReplyMessage) {
     40            // DSRM processing now in FloodOnlyLookupSelector instead of here,
    5241            // a dsrm is only passed in when there are no more lookups remaining
    53             // If more than one peer sent one, we only process the last one
    54             // And sadly if the first peer sends a DRSM and the second one times out,
    55             // this won't get called...
    56             _dsrm = (DatabaseSearchReplyMessage) message;
     42            // so that all DSRM's are processed, not just the last one.
    5743            _search.failed();
    5844            return;
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupSelector.java

    re7f4494 rdcc1861  
    3939            DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
    4040            if (_search.getKey().equals(dsrm.getSearchKey())) {
    41                 _search.decrementRemaining(dsrm.getFromHash());
    42                 // assume 0 old, all new, 0 invalid, 0 dup
     41
     42                // TODO - dsrm.getFromHash() can't be trusted - check against the list of
     43                // those we sent the search to in _search ?
     44
     45                // assume 0 new, all old, 0 invalid, 0 dup
    4346                _context.profileManager().dbLookupReply(dsrm.getFromHash(),  0, dsrm.getNumReplies(), 0, 0,
    4447                                                        System.currentTimeMillis()-_search.getCreated());
    45                 if (_search.getLookupsRemaining() <= 0)
    46                     return true; // ok, no more left, so time to fail
    47                 else
    48                     return false;
     48
     49                // Moved from FloodOnlyLookupMatchJob so it is called for all replies
     50                // rather than just the last one
     51                // Got a netDb reply pointing us at other floodfills...
     52                // Only process if we don't know enough floodfills or are starting up
     53                if (_search.shouldProcessDSRM()) {
     54                    if (_log.shouldLog(Log.INFO))
     55                        _log.info(_search.getJobId() + ": Processing DSRM via SingleLookupJob, apparently from " + dsrm.getFromHash());
     56                    // Chase the hashes from the reply
     57                    _context.jobQueue().addJob(new SingleLookupJob(_context, dsrm));
     58                } else if (_log.shouldLog(Log.INFO)) {
     59                    int remaining = _search.getLookupsRemaining();
     60                    _log.info(_search.getJobId() + ": got a DSRM apparently from " + dsrm.getFromHash() + " when we were looking for "
     61                              + _search.getKey() + ", with " + remaining + " outstanding searches");
     62                }
     63
     64                // if no more left, time to fail
     65                int remaining = _search.decrementRemaining(dsrm.getFromHash());
     66                return remaining <= 0;
    4967            }
    5068        }
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlyLookupTimeoutJob.java

    re7f4494 rdcc1861  
    77class FloodOnlyLookupTimeoutJob extends JobImpl {
    88    private final FloodSearchJob _search;
    9     private final Log _log;
    109
    1110    public FloodOnlyLookupTimeoutJob(RouterContext ctx, FloodOnlySearchJob job) {
    1211        super(ctx);
    1312        _search = job;
    14         _log = ctx.logManager().getLog(getClass());
    1513    }
    1614
    1715    public void runJob() {
    18         if (_log.shouldLog(Log.INFO))
    19             _log.info(_search.getJobId() + ": search timed out");
     16        Log log = getContext().logManager().getLog(getClass());
     17        if (log.shouldLog(Log.INFO))
     18            log.info(_search.getJobId() + ": search timed out");
    2019        _search.failed();
    2120    }
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java

    re7f4494 rdcc1861  
    3939class FloodOnlySearchJob extends FloodSearchJob {
    4040    private volatile boolean _dead;
    41     private final long _created;
     41    protected final long _created;
    4242    private boolean _shouldProcessDSRM;
    4343    private final HashSet<Hash> _unheardFrom;
    4444   
    45     private final List<OutNetMessage> _out;
     45    /** this is a marker to register with the MessageRegistry, it is never sent */
     46    private OutNetMessage _out;
    4647    protected final MessageSelector _replySelector;
    4748    protected final ReplyJob _onReply;
    4849    protected final Job _onTimeout;
    4950
     51    private static final int MIN_FOR_NO_DSRM = 4;
     52
    5053    public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
    5154        super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
    5255        // these override the settings in super
    53         _log = ctx.logManager().getLog(FloodOnlySearchJob.class);
    5456        _timeoutMs = Math.min(timeoutMs, SearchJob.PER_FLOODFILL_PEER_TIMEOUT);
    5557        _expiration = _timeoutMs + ctx.clock().now();
    5658        _origExpiration = _timeoutMs + ctx.clock().now();
    57         // do we need a synchronizedList, since we synch on _out everywhere below...
    58         _out = Collections.synchronizedList(new ArrayList(2));
    5959        _unheardFrom = new HashSet(CONCURRENT_SEARCHES);
    6060        _replySelector = new FloodOnlyLookupSelector(getContext(), this);
     
    6464    }
    6565
     66    /** System time, NOT context time */
    6667    public long getCreated() { return _created; }
     68
    6769    public boolean shouldProcessDSRM() { return _shouldProcessDSRM; }
    68     private static final int CONCURRENT_SEARCHES = 2;
    69     private static final int MIN_FOR_NO_DSRM = 4;
    7070
    7171    @Override
     
    106106            Collections.shuffle(floodfillPeers, getContext().random());
    107107        }
    108         OutNetMessage out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
    109         synchronized (_out) { _out.add(out); }
     108
     109        // This OutNetMessage is never used or sent (setMessage() is never called), it's only
     110        // so we can register a reply selector.
     111        _out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
    110112
    111113/********
     
    195197    public String getName() { return "NetDb flood search (phase 1)"; }
    196198   
    197     /** Note that we heard from the peer */
    198     void decrementRemaining(Hash peer) {
    199         decrementRemaining();
     199    /**
     200     *  Note that we heard from the peer
     201     *
     202     *  @return number remaining after decrementing
     203     */
     204    int decrementRemaining(Hash peer) {
    200205        synchronized(_unheardFrom) {
    201206            _unheardFrom.remove(peer);
     207            return decrementRemaining();
    202208        }
    203209    }
     
    209215            _dead = true;
    210216        }
    211         List outBuf = null;
    212         synchronized (_out) { outBuf = new ArrayList(_out); }
    213         for (int i = 0; i < outBuf.size(); i++) {
    214             OutNetMessage out = (OutNetMessage)outBuf.get(i);
    215             getContext().messageRegistry().unregisterPending(out);
    216         }
     217        getContext().messageRegistry().unregisterPending(_out);
    217218        int timeRemaining = (int)(_origExpiration - getContext().clock().now());
    218219        if (_log.shouldLog(Log.INFO))
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodSearchJob.java

    re7f4494 rdcc1861  
    3030 */
    3131public class FloodSearchJob extends JobImpl {
    32     protected Log _log;
     32    protected final Log _log;
    3333    protected final FloodfillNetworkDatabaseFacade _facade;
    3434    protected final Hash _key;
     
    4444    public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
    4545        super(ctx);
    46         _log = ctx.logManager().getLog(FloodSearchJob.class);
     46        _log = ctx.logManager().getLog(getClass());
    4747        _facade = facade;
    4848        _key = key;
    49         _onFind = new ArrayList();
     49        _onFind = new ArrayList(4);
    5050        _onFind.add(onFind);
    51         _onFailed = new ArrayList();
     51        _onFailed = new ArrayList(4);
    5252        _onFailed.add(onFailed);
    53         int timeout = -1;
    54         timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
     53        int timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
    5554        if (timeout < timeoutMs)
    5655            timeout = timeoutMs;
     
    5958        _origExpiration = timeoutMs + ctx.clock().now();
    6059        _isLease = isLease;
    61         _lookupsRemaining = 0;
    62         _dead = false;
    63     }
     60    }
     61
    6462    void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
    6563        if (_dead) {
     
    7068        }
    7169    }
     70
     71    /** using context clock */
    7272    public long getExpiration() { return _expiration; }
    73     private static final int CONCURRENT_SEARCHES = 2;
     73
     74    protected static final int CONCURRENT_SEARCHES = 2;
    7475    private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
    7576    private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
     77
    7678    public void runJob() {
    7779        // pick some floodfill peers and send out the searches
     
    121123        }
    122124    }
     125
    123126    public String getName() { return "NetDb search (phase 1)"; }
    124127   
    125128    protected Hash getKey() { return _key; }
    126     protected void decrementRemaining() { if (_lookupsRemaining > 0) _lookupsRemaining--; }
     129
     130    /**
     131     *  TODO AtomicInteger?
     132     *  @return number remaining after decrementing
     133     */
     134    protected int decrementRemaining() {
     135        if (_lookupsRemaining > 0)
     136            return (--_lookupsRemaining);
     137        return 0;
     138    }
     139
    127140    protected int getLookupsRemaining() { return _lookupsRemaining; }
    128141   
     
    145158        }
    146159    }
     160
    147161    void success() {
    148162        if (_dead) return;
     
    167181        }
    168182        public void runJob() {
    169             _search.decrementRemaining();
    170             if (_search.getLookupsRemaining() <= 0)
     183            int remaining = _search.decrementRemaining();
     184            if (remaining <= 0)
    171185                _search.failed();
    172186        }
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java

    re7f4494 rdcc1861  
    3434public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
    3535    public static final char CAPABILITY_FLOODFILL = 'f';
    36     private final Map _activeFloodQueries;
     36    private final Map<Hash, FloodSearchJob> _activeFloodQueries;
    3737    private boolean _floodfillEnabled;
    3838    /** for testing, see isFloodfill() below */
     
    251251        FloodSearchJob searchJob = null;
    252252        synchronized (_activeFloodQueries) {
    253             searchJob = (FloodSearchJob)_activeFloodQueries.get(key);
     253            searchJob = _activeFloodQueries.get(key);
    254254            if (searchJob == null) {
    255255                //if (SearchJob.onlyQueryFloodfillPeers(_context)) {
     
    326326        }
    327327    }
     328
    328329    void complete(Hash key) {
    329330        synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java

    re7f4494 rdcc1861  
    4444     * List will not include our own hash.
    4545     *
     46     * @param key the ROUTING key (NOT the original key)
    4647     * @param peersToIgnore can be null
    4748     * @return List of Hash for the peers selected
     
    5859     * List will not include our own hash.
    5960     *
     61     * @param key the ROUTING key (NOT the original key)
    6062     * @param peersToIgnore can be null
    6163     * @return List of Hash for the peers selected
     
    7173     * List will not include our own hash.
    7274     *
     75     * @param key the ROUTING key (NOT the original key)
    7376     * @param peersToIgnore can be null
    7477     * @return List of Hash for the peers selected
     
    134137     *
    135138     *  @return floodfills closest to the key that are not shitlisted forever
    136      *  @param key the routing key
     139     *  @param key the ROUTING key (NOT the original key)
    137140     *  @param maxNumRouters max to return
    138141     *  Sorted by closest to the key if > maxNumRouters, otherwise not
     
    160163     *  See above for description
    161164     *  List will not include our own hash
     165     *  @param key the ROUTING key (NOT the original key)
    162166     *  @param toIgnore can be null
    163167     */
     
    176180     *  See above for description
    177181     *  List MAY CONTAIN our own hash unless included in toIgnore
     182     *  @param key the ROUTING key (NOT the original key)
    178183     *  @param toIgnore can be null
    179184     */
     
    272277        /**
    273278         *  Warning - may return our router hash - add to toIgnore if necessary
     279         *  @param key the ROUTING key (NOT the original key)
    274280         *  @param toIgnore can be null
    275281         */
  • router/java/src/net/i2p/router/networkdb/kademlia/SingleLookupJob.java

    re7f4494 rdcc1861  
    66import net.i2p.router.JobImpl;
    77import net.i2p.router.RouterContext;
    8 import net.i2p.util.Log;
     8//import net.i2p.util.Log;
    99
    1010/**
     
    2121 */
    2222class SingleLookupJob extends JobImpl {
    23     private final Log _log;
     23    //private final Log _log;
    2424    private final DatabaseSearchReplyMessage _dsrm;
    2525
    2626    public SingleLookupJob(RouterContext ctx, DatabaseSearchReplyMessage dsrm) {
    2727        super(ctx);
    28         _log = ctx.logManager().getLog(getClass());
     28        //_log = ctx.logManager().getLog(getClass());
    2929        _dsrm = dsrm;
    3030    }
  • router/java/src/net/i2p/router/networkdb/kademlia/SingleSearchJob.java

    re7f4494 rdcc1861  
    5757    void failed() {
    5858        getContext().messageRegistry().unregisterPending(_onm);
     59        getContext().profileManager().dbLookupFailed(_to);
    5960    }
    6061
    6162    @Override
    62     void success() {}
     63    void success() {
     64        getContext().profileManager().dbLookupSuccessful(_to, System.currentTimeMillis()-_created);
     65    }
    6366}
Note: See TracChangeset for help on using the changeset viewer.