Changeset b045fb3a


Ignore:
Timestamp:
Nov 9, 2009 5:23:22 PM (10 years ago)
Author:
zzz <zzz@…>
Branches:
master
Children:
2dc37981
Parents:
37a2ccc
Message:
  • FloodOnlySearchJob?:
    • Fix up field hiding and duplicate overrides
    • Other javadoc and java 5 improvements
Location:
router/java/src/net/i2p/router/networkdb/kademlia
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java

    r37a2ccc rb045fb3a  
    3838 */
    3939class FloodOnlySearchJob extends FloodSearchJob {
    40     protected Log _log;
    41     private FloodfillNetworkDatabaseFacade _facade;
    42     protected Hash _key;
    43     private final List _onFind;
    44     private final List _onFailed;
    45     private long _expiration;
    46     protected int _timeoutMs;
    47     private long _origExpiration;
    48     private boolean _isLease;
    4940    protected volatile int _lookupsRemaining;
    5041    private volatile boolean _dead;
    5142    private long _created;
    5243    private boolean _shouldProcessDSRM;
    53     private final HashSet _unheardFrom;
     44    private final HashSet<Hash> _unheardFrom;
    5445   
    55     protected final List _out;
     46    private final List<OutNetMessage> _out;
    5647    protected MessageSelector _replySelector;
    5748    protected ReplyJob _onReply;
    5849    protected Job _onTimeout;
     50
    5951    public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
    6052        super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
     53        // these override the settings in super
    6154        _log = ctx.logManager().getLog(FloodOnlySearchJob.class);
    62         _facade = facade;
    63         _key = key;
    64         _onFind = new ArrayList();
    65         _onFind.add(onFind);
    66         _onFailed = new ArrayList();
    67         _onFailed.add(onFailed);
    6855        _timeoutMs = Math.min(timeoutMs, SearchJob.PER_FLOODFILL_PEER_TIMEOUT);
    6956        _expiration = _timeoutMs + ctx.clock().now();
    7057        _origExpiration = _timeoutMs + ctx.clock().now();
    71         _isLease = isLease;
    72         _lookupsRemaining = 0;
    73         _dead = false;
     58        // do we need a synchronizedList, since we synch on _out everywhere below...
    7459        _out = Collections.synchronizedList(new ArrayList(2));
    7560        _unheardFrom = new HashSet(CONCURRENT_SEARCHES);
     
    8065        _shouldProcessDSRM = false;
    8166    }
    82     @Override
    83     void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
    84         if (_dead) {
    85             getContext().jobQueue().addJob(onFailed);
    86         } else {
    87             if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
    88             if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
    89         }
    90     }
    91     @Override
    92     public long getExpiration() { return _expiration; }
     67
    9368    public long getCreated() { return _created; }
    9469    public boolean shouldProcessDSRM() { return _shouldProcessDSRM; }
     
    189164    public String getName() { return "NetDb flood search (phase 1)"; }
    190165   
    191     @Override
    192     Hash getKey() { return _key; }
    193     @Override
    194     void decrementRemaining() { if (_lookupsRemaining > 0) _lookupsRemaining--; }
    195     @Override
    196     int getLookupsRemaining() { return _lookupsRemaining; }
    197166    /** Note that we heard from the peer */
    198167    void decrementRemaining(Hash peer) {
     
    219188            _log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining + " remaining after " + (System.currentTimeMillis()-_created));
    220189        synchronized(_unheardFrom) {
    221             for (Iterator iter = _unheardFrom.iterator(); iter.hasNext(); )
    222                 getContext().profileManager().dbLookupFailed((Hash) iter.next());
     190            for (Iterator<Hash> iter = _unheardFrom.iterator(); iter.hasNext(); )
     191                getContext().profileManager().dbLookupFailed(iter.next());
    223192        }
    224193        _facade.complete(_key);
     
    249218        synchronized(_unheardFrom) {
    250219            if (_unheardFrom.size() == 1) {
    251                 Hash peer = (Hash) _unheardFrom.iterator().next();
     220                Hash peer = _unheardFrom.iterator().next();
    252221                getContext().profileManager().dbLookupSuccessful(peer, System.currentTimeMillis()-_created);
    253222            }
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodSearchJob.java

    r37a2ccc rb045fb3a  
    2424 * by simple delays in responses from floodfill peers
    2525 *
     26 * NOTE: Unused directly - see FloodOnlySearchJob extension which overrides almost everything.
     27 * TODO: Comment out or delete what we don't use here.
    2628 */
    2729public class FloodSearchJob extends JobImpl {
    28     private Log _log;
    29     private FloodfillNetworkDatabaseFacade _facade;
    30     private Hash _key;
    31     private final List _onFind;
    32     private final List _onFailed;
    33     private long _expiration;
    34     private int _timeoutMs;
    35     private long _origExpiration;
    36     private boolean _isLease;
    37     private volatile int _lookupsRemaining;
    38     private volatile boolean _dead;
     30    protected Log _log;
     31    protected FloodfillNetworkDatabaseFacade _facade;
     32    protected Hash _key;
     33    protected final List<Job> _onFind;
     34    protected final List<Job> _onFailed;
     35    protected long _expiration;
     36    protected int _timeoutMs;
     37    protected long _origExpiration;
     38    protected boolean _isLease;
     39    protected volatile int _lookupsRemaining;
     40    protected volatile boolean _dead;
     41
    3942    public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
    4043        super(ctx);
     
    8790            if ( (replyTunnel == null) || (outTunnel == null) ) {
    8891                _dead = true;
    89                 List removed = null;
     92                List<Job> removed = null;
    9093                synchronized (_onFailed) {
    9194                    removed = new ArrayList(_onFailed);
     
    9396                }
    9497                while (removed.size() > 0)
    95                     getContext().jobQueue().addJob((Job)removed.remove(0));
     98                    getContext().jobQueue().addJob(removed.remove(0));
    9699                getContext().messageRegistry().unregisterPending(out);
    97100                return;
     
    118121    public String getName() { return "NetDb search (phase 1)"; }
    119122   
    120     Hash getKey() { return _key; }
    121     void decrementRemaining() { _lookupsRemaining--; }
    122     int getLookupsRemaining() { return _lookupsRemaining; }
     123    protected Hash getKey() { return _key; }
     124    protected void decrementRemaining() { if (_lookupsRemaining > 0) _lookupsRemaining--; }
     125    protected int getLookupsRemaining() { return _lookupsRemaining; }
    123126   
    124127    void failed() {
     
    131134            _facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
    132135        } else {
    133             List removed = null;
     136            List<Job> removed = null;
    134137            synchronized (_onFailed) {
    135138                removed = new ArrayList(_onFailed);
     
    137140            }
    138141            while (removed.size() > 0)
    139                 getContext().jobQueue().addJob((Job)removed.remove(0));
     142                getContext().jobQueue().addJob(removed.remove(0));
    140143        }
    141144    }
     
    146149        _dead = true;
    147150        _facade.complete(_key);
    148         List removed = null;
     151        List<Job> removed = null;
    149152        synchronized (_onFind) {
    150153            removed = new ArrayList(_onFind);
     
    152155        }
    153156        while (removed.size() > 0)
    154             getContext().jobQueue().addJob((Job)removed.remove(0));
     157            getContext().jobQueue().addJob(removed.remove(0));
    155158    }
    156159
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java

    r37a2ccc rb045fb3a  
    180180    }
    181181
    182     public List getKnownRouterData() {
    183         List rv = new ArrayList();
     182    public List<RouterInfo> getKnownRouterData() {
     183        List<RouterInfo> rv = new ArrayList();
    184184        DataStore ds = getDataStore();
    185185        if (ds != null) {
     
    189189                    Object o = getDataStore().get((Hash)iter.next());
    190190                    if (o instanceof RouterInfo)
    191                         rv.add(o);
     191                        rv.add((RouterInfo)o);
    192192                }
    193193            }
     
    238238     * wider kademlia-style searches
    239239     */
    240     void searchFull(Hash key, List onFind, List onFailed, long timeoutMs, boolean isLease) {
     240    void searchFull(Hash key, List<Job> onFind, List<Job> onFailed, long timeoutMs, boolean isLease) {
    241241        synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
    242242       
     
    246246            synchronized (onFind) {
    247247                if (onFind.size() > 0)
    248                     find = (Job)onFind.remove(0);
     248                    find = onFind.remove(0);
    249249            }
    250250        }
     
    252252            synchronized (onFailed) {
    253253                if (onFailed.size() > 0)
    254                     fail = (Job)onFailed.remove(0);
     254                    fail = onFailed.remove(0);
    255255            }
    256256        }
     
    261261                          + job.getJobId() + ") with " + timeoutMs + " remaining");
    262262            long expiration = timeoutMs + _context.clock().now();
    263             List removed = null;
     263            List<Job> removed = null;
    264264            if (onFind != null) {
    265265                synchronized (onFind) {
     
    268268                }
    269269                for (int i = 0; i < removed.size(); i++)
    270                     job.addDeferred((Job)removed.get(i), null, expiration, isLease);
     270                    job.addDeferred(removed.get(i), null, expiration, isLease);
    271271                removed = null;
    272272            }
     
    277277                }
    278278                for (int i = 0; i < removed.size(); i++)
    279                     job.addDeferred(null, (Job)removed.get(i), expiration, isLease);
     279                    job.addDeferred(null, removed.get(i), expiration, isLease);
    280280                removed = null;
    281281            }
     
    288288    /** list of the Hashes of currently known floodfill peers;
    289289      * Returned list will not include our own hash.
     290      *  List is not sorted and not shuffled.
    290291      */
    291     public List getFloodfillPeers() {
     292    public List<Hash> getFloodfillPeers() {
    292293        FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
    293294        return sel.selectFloodfillParticipants(getKBuckets());
  • router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java

    r37a2ccc rb045fb3a  
    2323import net.i2p.util.Log;
    2424
     25/**
     26 *  This is where we implement semi-Kademlia with the floodfills, by
     27 *  selecting floodfills closest to a given key for
     28 *  searches and stores.
     29 *
     30 */
    2531class FloodfillPeerSelector extends PeerSelector {
    2632    public FloodfillPeerSelector(RouterContext ctx) { super(ctx); }
     
    2935     * Pick out peers with the floodfill capacity set, returning them first, but then
    3036     * after they're complete, sort via kademlia.
     37     * Puts the floodfill peers that are directly connected first in the list.
    3138     *
    3239     * @return List of Hash for the peers selected
     
    3744    }
    3845
     46    /**
     47     * Pick out peers with the floodfill capacity set, returning them first, but then
     48     * after they're complete, sort via kademlia.
     49     * Does not prefer the floodfill peers that are directly connected.
     50     *
     51     * @return List of Hash for the peers selected
     52     */
    3953    @Override
    4054    public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
     
    5973    /**
    6074     *  @return all floodfills not shitlisted forever. list will not include our own hash
    61      *
     75     *  List is not sorted and not shuffled.
    6276     */
    6377    public List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
     
    7084    /**
    7185     *  @return all floodfills not shitlisted foreverx
     86     *  @param key the routing key
    7287     *  @param maxNumRouters max to return
    7388     *  Sorted by closest to the key if > maxNumRouters, otherwise not
     
    105120            _wanted = wanted;
    106121        }
     122
     123        /**
     124         *  @return unsorted list of all with the 'f' mark in their netdb
     125         *          except for shitlisted ones.
     126         */
    107127        public List<Hash> getFloodfillParticipants() { return _floodfillMatches; }
     128
    108129        private static final int EXTRA_MATCHES = 100;
    109130        public void add(Hash entry) {
     
    145166        }
    146167
     168        /**
     169         *  @return list of all with the 'f' mark in their netdb except for shitlisted ones.
     170         *  The list is in 3 groups - unsorted (shuffled) within each group.
     171         *  Group 1: If preferConnected = true, the peers we are directly
     172         *           connected to, that meet the group 2 criteria
     173         *  Group 2: Netdb published less than 3h ago, no bad send in last 30m.
     174         *  Group 3: All others
     175         */
    147176        public List<Hash> get(int howMany, boolean preferConnected) {
    148177            Collections.shuffle(_floodfillMatches, _context.random());
Note: See TracChangeset for help on using the changeset viewer.