Analyzer analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
{
Tokenizer source = new FooTokenizer(reader);
TokenStream filter = new FooFilter(source);
filter = new BarFilter(filter);
return new TokenStreamComponents(source, filter);
});
For more examples, see the
var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
{
Tokenizer source = new FooTokenizer(reader);
TokenStream filter = new FooFilter(source);
filter = new BarFilter(filter);
return new TokenStreamComponents(source, filter);
});
var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
{
Tokenizer source = new FooTokenizer(reader);
TokenStream filter = new FooFilter(source);
filter = new BarFilter(filter);
return new TokenStreamComponents(source, filter);
}, reuseStrategy);
var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
{
Tokenizer source = new FooTokenizer(reader);
TokenStream filter = new FooFilter(source);
filter = new BarFilter(filter);
return new TokenStreamComponents(source, filter);
}, initReader: (fieldName, reader) =>
{
return new HTMLStripCharFilter(reader);
});
var analyzer = Analyzer.NewAnonymous(createComponents: (fieldName, reader) =>
{
Tokenizer source = new FooTokenizer(reader);
TokenStream filter = new FooFilter(source);
filter = new BarFilter(filter);
return new TokenStreamComponents(source, filter);
}, initReader: (fieldName, reader) =>
{
return new HTMLStripCharFilter(reader);
}, reuseStrategy);
IndexableFieldType fieldType = new IndexableFieldType(TextField.TYPE_NOT_STORED)
{
OmitNorms = true,
IndexOptions = IndexOptions.DOCS_ONLY
};
Field field = new Field(name, new NumericTokenStream(precisionStep).SetInt32Value(value), fieldType);
document.Add(field);
NumericTokenStream stream = new NumericTokenStream(precisionStep);
IndexableFieldType fieldType = new IndexableFieldType(TextField.TYPE_NOT_STORED)
{
OmitNorms = true,
IndexOptions = IndexOptions.DOCS_ONLY
};
Field field = new Field(name, stream, fieldType);
Document document = new Document();
document.Add(field);
for(all documents)
{
stream.SetInt32Value(value)
writer.AddDocument(document);
}
new Field(name, new NumericTokenStream(precisionStep).SetInt64Value(value)) new Field(name, new NumericTokenStream(precisionStep).SetInt32Value(value)) new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value)) new Field(name, new NumericTokenStream(precisionStep).SetSingleValue(value))
return reusableToken.Reinit(string, startOffset, endOffset[, type]);
return reusableToken.Reinit(string, 0, string.Length, startOffset, endOffset[, type]);
return reusableToken.Reinit(buffer, 0, buffer.Length, startOffset, endOffset[, type]);
return reusableToken.Reinit(buffer, start, end - start, startOffset, endOffset[, type]);
return reusableToken.Reinit(source.Buffer, 0, source.Length, source.StartOffset, source.EndOffset[, source.Type]);
EndOffset -
obj.SetLength(30).Append("hey you");
EndOffset -
TermToBytesRefAttribute termAtt = tokenStream.GetAttribute<TermToBytesRefAttribute>;
BytesRef bytes = termAtt.BytesRef;
while (tokenStream.IncrementToken()
{
// you must call termAtt.FillBytesRef() before doing something with the bytes.
// this encodes the term value (internally it might be a char[], etc) into the bytes.
int hashCode = termAtt.FillBytesRef();
if (IsInteresting(bytes))
{
// because the bytes are reused by the attribute (like ICharTermAttribute's char[] buffer),
// you should make a copy if you need persistent access to the bytes, otherwise they will
// be rewritten across calls to IncrementToken()
DoSomethingWith(new BytesRef(bytes));
}
}
...
@lucene.experimental this is a very expert API, please use
public sealed class CustomCodec : FilterCodec
{
public CustomCodec()
: base("CustomCodec", new Lucene46Codec())
{
}
public override LiveDocsFormat LiveDocsFormat
{
get { return new CustomLiveDocsFormat(); }
}
}
PackedInts.COMPACT
Example for skipInterval = 3:
c (skip level 2)
c c c (skip level 1)
x x x x x x x x x x (skip level 0)
d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
3 6 9 12 15 18 21 24 27 30 (df)
d - document
x - skip data
c - skip data with child pointer
Skip level i contains every skipInterval-th entry from skip level i-1.
Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))).
Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1.
this guarantees a logarithmic amount of skips to find the target document.
While this class takes care of writing the different skip levels,
subclasses must define the actual format of the skip data.
// Register the factory at application start up.
Codec.SetCodecFactory(new DefaultCodecFactory {
CustomCodecTypes = new Type[] { typeof(MyCodec), typeof(AnotherCodec) }
});
public class ExplicitCodecFactory : DefaultCodecFactory
{
protected override void Initialize()
{
// Load specific codecs in a specific order.
PutCodecType(typeof(MyCodec));
PutCodecType(typeof(AnotherCodec));
}
protected override Codec NewCodec(Type type)
{
// Special case: AnotherCodec has a required dependency
if (typeof(AnotherCodec).Equals(type))
return new AnotherCodec(new SomeDependency());
return base.NewCodec(type);
}
}
// Register the factory at application start up.
Codec.SetCodecFactory(new ExplicitCodecFactory());
See the
public class ScanningCodecFactory : DefaultCodecFactory
{
protected override void Initialize()
{
// Load all default codecs
base.Initialize();
// Load all of the codecs inside of the same assembly that MyCodec is defined in
ScanForCodecs(typeof(MyCodec).Assembly);
}
}
// Register the factory at application start up.
Codec.SetCodecFactory(new ScanningCodecFactory());
Codecs in the target assemblie(s) can be excluded from the scan by decorating them with
the
// Register the factory at application start up.
DocValuesFormat.SetDocValuesFormatFactory(new DefaultDocValuesFormatFactory {
CustomDocValuesFormatTypes = new Type[] { typeof(MyDocValuesFormat), typeof(AnotherDocValuesFormat) }
});
public class ExplicitDocValuesFormatFactory : DefaultDocValuesFormatFactory
{
protected override void Initialize()
{
// Load specific codecs in a specific order.
PutDocValuesFormatType(typeof(MyDocValuesFormat));
PutDocValuesFormatType(typeof(AnotherDocValuesFormat));
}
protected override DocValuesFormat NewDocValuesFormat(Type type)
{
// Special case: AnotherDocValuesFormat has a required dependency
if (typeof(AnotherDocValuesFormat).Equals(type))
return new AnotherDocValuesFormat(new SomeDependency());
return base.NewDocValuesFormat(type);
}
}
// Register the factory at application start up.
DocValuesFormat.SetDocValuesFormatFactory(new ExplicitDocValuesFormatFactory());
See the
public class ScanningDocValuesFormatFactory : DefaultDocValuesFormatFactory
{
protected override void Initialize()
{
// Load all default codecs
base.Initialize();
// Load all of the codecs inside of the same assembly that MyDocValuesFormat is defined in
ScanForDocValuesFormats(typeof(MyDocValuesFormat).Assembly);
}
}
// Register the factory at application start up.
DocValuesFormat.SetDocValuesFormatFactory(new ScanningDocValuesFormatFactory());
Doc values formats in the target assembly can be excluded from the scan by decorating them with
the
// Register the factory at application start up.
PostingsFormat.SetPostingsFormatFactory(new DefaultPostingsFormatFactory {
CustomPostingsFormatTypes = new Type[] { typeof(MyPostingsFormat), typeof(AnotherPostingsFormat) }
});
public class ExplicitPostingsFormatFactory : DefaultPostingsFormatFactory
{
protected override void Initialize()
{
// Load specific codecs in a specific order.
PutPostingsFormatType(typeof(MyPostingsFormat));
PutPostingsFormatType(typeof(AnotherPostingsFormat));
}
protected override PostingsFormat NewPostingsFormat(Type type)
{
// Special case: AnotherPostingsFormat has a required dependency
if (typeof(AnotherPostingsFormat).Equals(type))
return new AnotherPostingsFormat(new SomeDependency());
return base.NewPostingsFormat(type);
}
}
// Register the factory at application start up.
PostingsFormat.SetPostingsFormatFactory(new ExplicitPostingsFormatFactory());
See the
public class ScanningPostingsFormatFactory : DefaultPostingsFormatFactory
{
protected override void Initialize()
{
// Load all default codecs
base.Initialize();
// Load all of the codecs inside of the same assembly that MyPostingsFormat is defined in
ScanForPostingsFormats(typeof(MyPostingsFormat).Assembly);
}
}
// Register the factory at application start up.
PostingsFormat.SetPostingsFormatFactory(new ScanningPostingsFormatFactory());
Postings formats in the target assembly can be excluded from the scan by decorating them with
the
document.Add(new BinaryDocValuesField(name, new BytesRef("hello")));
document.Add(new ByteDocValuesField(name, (byte) 22));
document.Add(new DerefBytesDocValuesField(name, new BytesRef("hello")));
document.Add(new DoubleField(name, 6.0, Field.Store.NO));
For optimal performance, re-use the
DoubleField field = new DoubleField(name, 0.0, Field.Store.NO);
Document document = new Document();
document.Add(field);
for (all documents)
{
...
field.SetDoubleValue(value)
writer.AddDocument(document);
...
}
See also
document.Add(new SingleField(name, 6.0F, Field.Store.NO));
For optimal performance, re-use the
FloatField field = new SingleField(name, 0.0F, Field.Store.NO);
Document document = new Document();
document.Add(field);
for (all documents)
{
...
field.SetSingleValue(value)
writer.AddDocument(document);
...
}
See also
document.Add(new Int32DocValuesField(name, 22));
If you also need to store the value, you should add a
separate
document.Add(new Int32Field(name, 6, Field.Store.NO));
For optimal performance, re-use the
Int32Field field = new Int32Field(name, 6, Field.Store.NO);
Document document = new Document();
document.Add(field);
for (all documents)
{
...
field.SetInt32Value(value)
writer.AddDocument(document);
...
}
See also
document.Add(new Int64DocValuesField(name, 22L));
document.Add(new Int64Field(name, 6L, Field.Store.NO));
For optimal performance, re-use the
Int64Field field = new Int64Field(name, 0L, Field.Store.NO);
Document document = new Document();
document.Add(field);
for (all documents) {
...
field.SetInt64Value(value)
writer.AddDocument(document);
...
}
See also
document.Add(new NumericDocValuesField(name, 22L));
If you also need to store the value, you should add a
separate
document.Add(new PackedInt64DocValuesField(name, 22L));
document.Add(new Int16DocValuesField(name, (short) 22));
document.Add(new SortedBytesDocValuesField(name, new BytesRef("hello")));
document.Add(new SortedDocValuesField(name, new BytesRef("hello")));
document.Add(new SortedSetDocValuesField(name, new BytesRef("hello")));
document.Add(new SortedSetDocValuesField(name, new BytesRef("world")));
document.Add(new StraightBytesDocValuesField(name, new BytesRef("hello")));
if (IsVerbose)
{
Message("your message");
}
true iff the given item is identical to the item
hold by the slices tail, otherwise false.
ext is not empty.
try
{
writer.Dispose();
}
finally
{
if (IndexWriter.IsLocked(directory))
{
IndexWriter.Unlock(directory);
}
}
after which, you must be certain not to use the writer
instance anymore.
IndexWriterConfig conf = new IndexWriterConfig(analyzer)
{
Codec = Lucene46Codec(),
OpenMode = OpenMode.CREATE
};
However, if you prefer to match the syntax of Lucene using chained setter methods,
there are extension methods in the Lucene.Net.Index.Extensions namespace. Example usage:
using Lucene.Net.Index.Extensions;
..
IndexWriterConfig conf = new IndexWriterConfig(analyzer)
.SetCodec(new Lucene46Codec())
.SetOpenMode(OpenMode.CREATE);
@since 3.1
public class MyLucene45Codec : Lucene45Codec
{
//customize Lucene41PostingsFormat, passing minBlockSize=50, maxBlockSize=100
private readonly PostingsFormat tweakedPostings = new Lucene41PostingsFormat(50, 100);
public override PostingsFormat GetPostingsFormatForField(string field)
{
if (field.Equals("fieldWithTonsOfTerms", StringComparison.Ordinal))
return tweakedPostings;
else
return base.GetPostingsFormatForField(field);
}
}
...
iwc.Codec = new MyLucene45Codec();
Note that other implementations may have their own parameters, or no parameters at all.
false.
IndexWriterConfig iwc = new IndexWriterConfig(LuceneVersion.LUCENE_XX, new KeywordAnalyzer());
iwc.MergePolicy = new UpgradeIndexMergePolicy(iwc.MergePolicy);
using (IndexWriter w = new IndexWriter(dir, iwc))
{
w.ForceMerge(1);
}
if (Verbose) {
Message("your message");
}
var booleanQuery = new BooleanQuery() {
{ new WildcardQuery(new Term("field2", "foobar")), Occur.SHOULD },
{ new MultiPhraseQuery() {
new Term("field", "microsoft"),
new Term("field", "office")
}, Occur.SHOULD }
};
// or
var booleanQuery = new BooleanQuery() {
new BooleanClause(new WildcardQuery(new Term("field2", "foobar")), Occur.SHOULD),
new BooleanClause(new MultiPhraseQuery() {
new Term("field", "microsoft"),
new Term("field", "office")
}, Occur.SHOULD)
};
foreach (BooleanClause clause in booleanQuery) {}
private class MySearchCollector : ICollector
{
private readonly OpenBitSet bits;
private int docBase;
public MySearchCollector(OpenBitSet bits)
{
if (bits is null) throw new ArgumentNullException("bits");
this.bits = bits;
}
// ignore scorer
public void SetScorer(Scorer scorer)
{
}
// accept docs out of order (for a BitSet it doesn't matter)
public bool AcceptDocsOutOfOrder
{
get { return true; }
}
public void Collect(int doc)
{
bits.Set(doc + docBase);
}
public void SetNextReader(AtomicReaderContext context)
{
this.docBase = context.DocBase;
}
}
IndexSearcher searcher = new IndexSearcher(indexReader);
OpenBitSet bits = new OpenBitSet(indexReader.MaxDoc);
searcher.Search(query, new MySearchCollector(bits));
IndexSearcher searcher = new IndexSearcher(indexReader);
OpenBitSet bits = new OpenBitSet(indexReader.MaxDoc);
int docBase;
searcher.Search(query,
Collector.NewAnonymous(setScorer: (scorer) =>
{
// ignore scorer
}, collect: (doc) =>
{
bits.Set(doc + docBase);
}, setNextReader: (context) =>
{
docBase = context.DocBase;
}, acceptsDocsOutOfOrder: () =>
{
return true;
})
);
var disjunctionMaxQuery = new DisjunctionMaxQuery(0.1f) {
new TermQuery(new Term("field1", "albino")),
new TermQuery(new Term("field2", "elephant"))
};
var docIdSet = DocIdSet.NewAnonymous(getIterator: () =>
{
OpenBitSet bitset = new OpenBitSet(5);
bitset.Set(0, 5);
return new DocIdBitSet(bitset);
});
var docIdSet = DocIdSet.NewAnonymous(getIterator: () =>
{
OpenBitSet bitset = new OpenBitSet(5);
bitset.Set(0, 5);
return new DocIdBitSet(bitset);
}, bits: () =>
{
return bits;
});
var docIdSet = DocIdSet.NewAnonymous(getIterator: () =>
{
OpenBitSet bitset = new OpenBitSet(5);
bitset.Set(0, 5);
return new DocIdBitSet(bitset);
}, isCacheable: () =>
{
return true;
});
var docIdSet = DocIdSet.NewAnonymous(getIterator: () =>
{
OpenBitSet bitset = new OpenBitSet(5);
bitset.Set(0, 5);
return new DocIdBitSet(bitset);
}, bits: () =>
{
return bits;
}, isCacheable: () =>
{
return true;
});
int Advance(int target)
{
int doc;
while ((doc = NextDoc()) < target)
{
}
return doc;
}
Some implementations are considerably more efficient than that.
var filter = Filter.NewAnonymous(getDocIdSet: (context, acceptDocs) =>
{
if (acceptDocs is null) acceptDocs = new Bits.MatchAllBits(5);
OpenBitSet bitset = new OpenBitSet(5);
if (acceptDocs.Get(1)) bitset.Set(1);
if (acceptDocs.Get(3)) bitset.Set(3);
return new DocIdBitSet(bitset);
});
true. Otherwise this strategy falls back to a "zig-zag join" (
PriorityQueue<ScoreDoc> pq = new HitQueue(10, true); // pre-populate.
ScoreDoc top = pq.Top;
// Add/Update one element.
top.Score = 1.0f;
top.Soc = 0;
top = (ScoreDoc) pq.UpdateTop();
int totalHits = 1;
// Now pop only the elements that were *truly* inserted.
// First, pop all the sentinel elements (there are pq.Count - totalHits).
for (int i = pq.Count - totalHits; i > 0; i--) pq.Pop();
// Now pop the truly added elements.
ScoreDoc[] results = new ScoreDoc[totalHits];
for (int i = totalHits - 1; i >= 0; i--)
{
results[i] = (ScoreDoc)pq.Pop();
}
NOTE:
.IndexReader.Document(docID) .IndexReader.Document(docID, fieldVisitor) .IndexReader.Document(docID, fieldsToLoad) n
hits for null ones.
var multiPhraseQuery = new MultiPhraseQuery() {
new Term("field", "microsoft"),
new Term("field", "office")
};
Note that as long as you specify all of the parameters, you can use either
GetTermsEnum(terms, new AttributeSource())
var phraseQuery = new NGramPhraseQuery(2) {
new Term("field", "ABCD"),
new Term("field", "EFGH")
};
Note that as long as you specify all of the parameters, you can use either
Filter f = NumericRangeFilter.NewFloatRange("weight", 0.03f, 0.10f, true, true);
Accepts all documents whose float valued "weight" field
ranges from 0.03 to 0.10, inclusive.
See
Query q = NumericRangeQuery.NewFloatRange("weight", 0.03f, 0.10f, true, true);
matches all documents whose Schindler, U, Diepenbroek, M, 2008. Generic XML-based Framework for Metadata Portals. Computers & Geosciences 34 (12), 1947-1955. doi:10.1016/j.cageo.2008.02.023
7*255*2 + 255 = 3825 distinct terms (when there is a term for every distinct value of an
8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
because it would always be possible to reduce the full 256 values to one term with degraded precision).
In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
and a uniform value distribution).
precisionStep is given.
var phraseQuery = new PhraseQuery() {
new Term("field", "microsoft"),
new Term("field", "office")
};
Note that as long as you specify all of the parameters, you can use either
public IndexSearcher NewSearcher(IndexReader r)
{
return new IndexSearcher(r);
}
You can pass your own factory instead if you want custom behavior, such as:
SearcherLifetimeManager mgr = new SearcherLifetimeManager();
Per search-request, if it's a "new" search request, then
obtain the latest searcher you have (for example, by
using
// Record the current searcher, and save the returend
// token into user's search results (eg as a hidden
// HTML form field):
long token = mgr.Record(searcher);
When a follow-up search arrives, for example the user
clicks next page, drills down/up, etc., take the token
that you saved from the previous search and:
// If possible, obtain the same searcher as the last
// search:
IndexSearcher searcher = mgr.Acquire(token);
if (searcher != null)
{
// Searcher is still here
try
{
// do searching...
}
finally
{
mgr.Release(searcher);
// Do not use searcher after this!
searcher = null;
}
}
else
{
// Searcher was pruned -- notify user session timed
// out, or, pull fresh searcher again
}
Finally, in a separate thread, ideally the same thread
that's periodically reopening your searchers, you should
periodically prune old searchers:
mgr.Prune(new PruneByAge(600.0));
IndexSearcher s = manager.Acquire();
try
{
// Do searching, doc retrieval, etc. with s
}
finally
{
manager.Release(s);
// Do not use s after this!
s = null;
}
Idf(docFreq, searcher.MaxDoc);
Note that state.Length is small.
Idf(docFreq, searcher.MaxDoc);
Note that document.Add(new Field("byNumber", x.ToString(CultureInfo.InvariantCulture), Field.Store.NO, Field.Index.NOT_ANALYZED));
4 * IndexReader.MaxDoc * (# of different fields actually used to sort)
teacherid: 1
studentfirstname: james
studentsurname: jones
teacherid: 2
studenfirstname: james
studentsurname: smith
studentfirstname: sally
studentsurname: jones
SpanQuery q1 = new SpanTermQuery(new Term("studentfirstname", "james"));
SpanQuery q2 = new SpanTermQuery(new Term("studentsurname", "jones"));
SpanQuery q2m = new FieldMaskingSpanQuery(q2, "studentfirstname");
Query q = new SpanNearQuery(new SpanQuery[] { q1, q2m }, -1, false);
to search for 'studentfirstname:james studentsurname:jones' and find
teacherid 1 without matching teacherid 2 (which has a 'james' in position 0
and 'jones' in position 1).
bool SkipTo(int target)
{
do
{
if (!Next())
return false;
} while (target > Doc);
return true;
}
Most implementations are considerably more efficient than that.
WildcardQuery wildcard = new WildcardQuery(new Term("field", "bro?n"));
SpanQuery spanWildcard = new SpanMultiTermQueryWrapper<WildcardQuery>(wildcard);
// do something with spanWildcard, such as use it in a SpanFirstQuery
o is equal to this.
bool SkipTo(int target)
{
do
{
if (!Next())
return false;
} while (target > Doc);
return true;
}
Most implementations are considerably more efficient than that.
// Counter is in the Lucene.Net.Util namespace
Counter clock = Counter.NewCounter(true);
long baseline = clock.Get();
// ... prepare search
TimeLimitingCollector collector = new TimeLimitingCollector(c, clock, numTicks);
collector.SetBaseline(baseline);
indexSearcher.Search(query, collector);
var searcherManager = new SearcherManager(indexWriter, true, null);
using (var context = searcherManager.GetContext())
{
IndexSearcher searcher = context.Reference;
// use searcher...
}
protected override void Dispose(bool disposing)
{
if (!CompareAndSetIsOpen(expect: true, update: false)) return;
// Dispose unmanaged resources
if (disposing)
{
// Dispose managed resources
}
}
Directory to; // the directory to copy to
foreach (string file in dir.ListAll()) {
dir.Copy(to, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name
}
var result = Lock.With.NewAnonymous<string>(
@lock: directory.MakeLock("my.lock"),
lockWaitTimeout: Lock.LOCK_OBTAIN_WAIT_FOREVER,
doBody: () =>
{
//... code to execute while locked ...
return "the result";
}).Run();
var result = Lock.With.NewAnonymous<string>(
@lock: directory.MakeLock("my.lock"),
lockWaitTimeout: Lock.LOCK_OBTAIN_WAIT_FOREVER,
doBody: () =>
{
//... code to execute while locked ...
return "the result";
}).Run();
using Directory fsDir = FSDirectory.Open(new DirectoryInfo("/path/to/index"));
using NRTCachingDirectory cachedFSDir = new NRTCachingDirectory(fsDir, 5.0, 60.0);
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_48, analyzer);
using IndexWriter writer = new IndexWriter(cachedFSDir, conf);
default as the second parameter.
[WritableArray, SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
public void ReflectWith(IAttributeReflector reflector)
{
reflector.Reflect(typeof(ICharTermAttribute), "term", GetTerm());
reflector.Reflect(typeof(IPositionIncrementAttribute), "positionIncrement", GetPositionIncrement());
}
public String ToString()
{
return "start=" + startOffset + ",end=" + endOffset;
}
This method may be overridden by subclasses.
I from the prefix.
def bits2int(val):
arr=0
for shift in range(8,0,-1):
if val & 0x80:
arr = (arr << 4) | shift
val = val << 1
return arr
def int_table():
tbl = [ hex(bits2int(val)).strip('L') for val in range(256) ]
return ','.join(tbl)
(i >>> (4 * n)) & 0x0F is the offset of the n-th set bit of
the given byte plus one, or 0 if there are n or less bits set in the given
byte. For example bitList(12) returns 0x43:
0x43 & 0x0F is 3, meaning the the first bit set is at offset 3-1 = 2,(0x43 >>> 4) & 0x0F is 4, meaning there is a second bit set at offset 4-1=3,(0x43 >>> 8) & 0x0F is 0, meaning there is no more bit set in this byte.
var iter = BytesRefEnumerator.EMPTY;
var iter = BytesRefIterator.EMPTY;
using (TextWriter sw = new StreamWriter("out.dot"))
{
Util.ToDot(fst, sw, true, true);
}
and then, from command line:
dot -Tpng -o out.png out.dot
IDisposable resource1 = null, resource2 = null, resource3 = null;
ExpectedException priorE = null;
try
{
resource1 = ...; resource2 = ...; resource3 = ...; // Acquisition may throw ExpectedException
..do..stuff.. // May throw ExpectedException
}
catch (ExpectedException e)
{
priorE = e;
}
finally
{
IOUtils.CloseWhileHandlingException(priorE, resource1, resource2, resource3);
}
IDisposable resource1 = null, resource2 = null, resource3 = null;
ExpectedException priorE = null;
try
{
resource1 = ...; resource2 = ...; resource3 = ...; // Acquisition may throw ExpectedException
..do..stuff.. // May throw ExpectedException
}
catch (ExpectedException e)
{
priorE = e;
}
finally
{
IOUtils.DisposeWhileHandlingException(priorE, resource1, resource2, resource3);
}
// Implements ISentinelFactory<T>.Create(PriorityQueue<T>)
var sentinelFactory = new MySentinelFactory<MyObject>();
PriorityQueue<MyObject> pq = new MyQueue<MyObject>(sentinelFactory);
// save the 'top' element, which is guaranteed to not be default .
MyObject pqTop = pq.Top;
<...>
// now in order to add a new element, which is 'better' than top (after
// you've verified it is better), it is as simple as:
pqTop.Change();
pqTop = pq.UpdateTop();
NOTE:
pq.Top.Change();
pq.UpdateTop();
instead of
o = pq.Pop();
o.Change();
pq.Push(o);
pq.Top.Change();
pq.UpdateTop();
instead of
o = pq.Pop();
o.Change();
pq.Push(o);
QueryBuilder builder = new QueryBuilder(analyzer);
Query a = builder.CreateBooleanQuery("body", "just a test");
Query b = builder.CreatePhraseQuery("body", "another test");
Query c = builder.CreateMinShouldMatchQuery("body", "another test", 0.5f);
SentinelIntSet set = ...
foreach (int v in set.keys)
{
if (v == set.EmptyVal)
continue;
//use v...
}
internal static readonly VirtualMethod newMethod =
new VirtualMethod(typeof(BaseClass), "newName", parameters...);
internal static readonly VirtualMethod oldMethod =
new VirtualMethod(typeof(BaseClass), "oldName", parameters...);
bool isDeprecatedMethodOverridden =
oldMethod.GetImplementationDistance(this.GetType()) > newMethod.GetImplementationDistance(this.GetType());
// alternatively (more readable):
bool isDeprecatedMethodOverridden =
VirtualMethod.CompareImplementationDistance(this.GetType(), oldMethod, newMethod) > 0
catch (Exception ex) when (ex.IsAssertionError())
catch (Exception ex) when (ex.IsError())
catch (Exception ex) when (ex.IsNoClassDefFoundError())
catch (Exception ex) when (ex.IsOutOfMemoryError())
catch (Exception ex) when (ex.IsServiceConfigurationError())
catch (Exception ex) when (ex.IsStackOverflowError())
catch (Lucene.Net.QueryParsers.Surround.Parser.ParseException e)
catch (Exception ex) when (ex.IsClassNotFoundException())
catch (Exception ex) when (ex.IsAlreadyClosedException())
Lucene made a custom
catch (Exception ex) when (ex.IsEOFException())
catch (Exception ex) when (ex.IsNoSuchMethodException())
catch (Exception ex) when (ex.IsArrayIndexOutOfBoundsException())
catch (Exception ex) when (ex.IsIllegalArgumentException())
catch (Exception ex) when (ex.IsIllegalStateException())
catch (Exception ex) when (ex.IsIndexOutOfBoundsException())
catch (Exception ex) when (ex.IsNullPointerException())
The static
Integer someInt = new Integer(43);
int primitiveInt = someInt; // Implicit cast by the Java compiler
If
int? someInt = 43;
int primitiveInt = someInt; // Compile error
So, to get the same behavior as in Java (provided the nullable cannot be factored away), the
appropriate translation would be:
int? someInt = 43;
int primitiveInt;
if (someInt.HasValue)
primitiveInt = someInt.Value;
else
throw new NullReferenceException();
However, do note in most cases it would be better to try to refactor so the nullable
(and therefore the exception) isn't required.
catch (Exception ex) when (ex.IsNumberFormatException())
catch (Exception ex) when (ex.IsRuntimeException())
catch (Exception ex) when (ex.IsStringIndexOutOfBoundsException())
catch (Exception ex) when (ex.IsUnsupportedOperationException())