package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.io.Serializable; import java.util.Collection; import java.util.Iterator; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.util.SmallFloat; /** Expert: Scoring API. *

Subclasses implement search scoring. * *

The score of query q for document d correlates to the * cosine-distance or dot-product between document and query vectors in a * * Vector Space Model (VSM) of Information Retrieval. * A document whose vector is closer to the query vector in that model is scored higher. * * The score is computed as follows: * *

* * *
* * * * * * * * * * * *
* score(q,d)   =   * coord(q,d)  ·  * queryNorm(q)  ·  * * * * ( * tf(t in d)  ·  * idf(t)2  ·  * t.getBoost() ·  * norm(t,d) * ) *
t in q
*
* *

where *

    *
  1. * * tf(t in d) * correlates to the term's frequency, * defined as the number of times term t appears in the currently scored document d. * Documents that have more occurrences of a given term receive a higher score. * The default computation for tf(t in d) in * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is: * *
     
    * * * * * *
    * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)}   =   * * frequency½ *
    *
     
    *
  2. * *
  3. * * idf(t) stands for Inverse Document Frequency. This value * correlates to the inverse of docFreq * (the number of documents in which the term t appears). * This means rarer terms give higher contribution to the total score. * The default computation for idf(t) in * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is: * *
     
    * * * * * * * *
    * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)}  =   * * 1 + log ( * * * * * *
    numDocs
    –––––––––
    docFreq+1
    *
    * ) *
    *
     
    *
  4. * *
  5. * * coord(q,d) * is a score factor based on how many of the query terms are found in the specified document. * Typically, a document that contains more of the query's terms will receive a higher score * than another document with fewer query terms. * This is a search time factor computed in * {@link #coord(int, int) coord(q,d)} * by the Similarity in effect at search time. *
     
    *
  6. * *
  7. * * queryNorm(q) * * is a normalizing factor used to make scores between queries comparable. * This factor does not affect document ranking (since all ranked documents are multiplied by the same factor), * but rather just attempts to make scores from different queries (or even different indexes) comparable. * This is a search time factor computed by the Similarity in effect at search time. * * The default computation in * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) DefaultSimilarity} * is: *
     
    * * * * * *
    * queryNorm(q)   =   * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) queryNorm(sumOfSquaredWeights)} *   =   * * * * * *
    1
    * –––––––––––––– *
    sumOfSquaredWeights½
    *
    *
     
    * * The sum of squared weights (of the query terms) is * computed by the query {@link org.apache.lucene.search.Weight} object. * For example, a {@link org.apache.lucene.search.BooleanQuery boolean query} * computes this value as: * *
     
    * * * * * * * * * * * *
    * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights}   =   * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} 2 *  ·  * * * * ( * idf(t)  ·  * t.getBoost() * ) 2 *
    t in q
    *
     
    * *
  8. * *
  9. * * t.getBoost() * is a search time boost of term t in the query q as * specified in the query text * (see query syntax), * or as set by application calls to * {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}. * Notice that there is really no direct API for accessing a boost of one term in a multi term query, * but rather multi terms are represented in a query as multi * {@link org.apache.lucene.search.TermQuery TermQuery} objects, * and so the boost of a term in the query is accessible by calling the sub-query * {@link org.apache.lucene.search.Query#getBoost() getBoost()}. *
     
    *
  10. * *
  11. * * norm(t,d) encapsulates a few (indexing time) boost and length factors: * * * *

    * When a document is added to the index, all the above factors are multiplied. * If the document has multiple fields with the same name, all their boosts are multiplied together: * *
     
    * * * * * * * * * * * *
    * norm(t,d)   =   * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()} *  ·  * {@link #lengthNorm(String, int) lengthNorm(field)} *  ·  * * * * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}() *
    field f in d named as t
    *
     
    * However the resulted norm value is {@link #encodeNorm(float) encoded} as a single byte * before being stored. * At search time, the norm byte value is read from the index * {@link org.apache.lucene.store.Directory directory} and * {@link #decodeNorm(byte) decoded} back to a float norm value. * This encoding/decoding, while reducing index size, comes with the price of * precision loss - it is not guaranteed that decode(encode(x)) = x. * For instance, decode(encode(0.89)) = 0.75. * Also notice that search time is too late to modify this norm part of scoring, e.g. by * using a different {@link Similarity} for search. *
     
    *

  12. *
* * @see #setDefault(Similarity) * @see IndexWriter#setSimilarity(Similarity) * @see Searcher#setSimilarity(Similarity) */ public abstract class Similarity implements Serializable { /** The Similarity implementation used by default. */ private static Similarity defaultImpl = new DefaultSimilarity(); /** Set the default Similarity implementation used by indexing and search * code. * * @see Searcher#setSimilarity(Similarity) * @see IndexWriter#setSimilarity(Similarity) */ public static void setDefault(Similarity similarity) { Similarity.defaultImpl = similarity; } /** Return the default Similarity implementation used by indexing and search * code. * *

This is initially an instance of {@link DefaultSimilarity}. * * @see Searcher#setSimilarity(Similarity) * @see IndexWriter#setSimilarity(Similarity) */ public static Similarity getDefault() { return Similarity.defaultImpl; } /** Cache of decoded bytes. */ private static final float[] NORM_TABLE = new float[256]; static { for (int i = 0; i < 256; i++) NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i); } /** Decodes a normalization factor stored in an index. * @see #encodeNorm(float) */ public static float decodeNorm(byte b) { return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127 } /** Returns a table for decoding normalization bytes. * @see #encodeNorm(float) */ public static float[] getNormDecoder() { return NORM_TABLE; } /** Computes the normalization value for a field given the total number of * terms contained in a field. These values, together with field boosts, are * stored in an index and multipled into scores for hits on each field by the * search code. * *

Matches in longer fields are less precise, so implementations of this * method usually return smaller values when numTokens is large, * and larger values when numTokens is small. * *

That these values are computed under {@link * IndexWriter#addDocument(org.apache.lucene.document.Document)} and stored then using * {@link #encodeNorm(float)}. Thus they have limited precision, and documents * must be re-indexed if this method is altered. * * @param fieldName the name of the field * @param numTokens the total number of tokens contained in fields named * fieldName of doc. * @return a normalization factor for hits on this field of this document * * @see org.apache.lucene.document.Field#setBoost(float) */ public abstract float lengthNorm(String fieldName, int numTokens); /** Computes the normalization value for a query given the sum of the squared * weights of each of the query terms. This value is then multipled into the * weight of each query term. * *

This does not affect ranking, but rather just attempts to make scores * from different queries comparable. * * @param sumOfSquaredWeights the sum of the squares of query term weights * @return a normalization factor for query weights */ public abstract float queryNorm(float sumOfSquaredWeights); /** Encodes a normalization factor for storage in an index. * *

The encoding uses a three-bit mantissa, a five-bit exponent, and * the zero-exponent point at 15, thus * representing values from around 7x10^9 to 2x10^-9 with about one * significant decimal digit of accuracy. Zero is also represented. * Negative numbers are rounded up to zero. Values too large to represent * are rounded down to the largest representable value. Positive values too * small to represent are rounded up to the smallest positive representable * value. * * @see org.apache.lucene.document.Field#setBoost(float) * @see SmallFloat */ public static byte encodeNorm(float f) { return SmallFloat.floatToByte315(f); } /** Computes a score factor based on a term or phrase's frequency in a * document. This value is multiplied by the {@link #idf(Term, Searcher)} * factor for each term in the query and these products are then summed to * form the initial score for a document. * *

Terms and phrases repeated in a document indicate the topic of the * document, so implementations of this method usually return larger values * when freq is large, and smaller values when freq * is small. * *

The default implementation calls {@link #tf(float)}. * * @param freq the frequency of a term within a document * @return a score factor based on a term's within-document frequency */ public float tf(int freq) { return tf((float)freq); } /** Computes the amount of a sloppy phrase match, based on an edit distance. * This value is summed for each sloppy phrase match in a document to form * the frequency that is passed to {@link #tf(float)}. * *

A phrase match with a small edit distance to a document passage more * closely matches the document, so implementations of this method usually * return larger values when the edit distance is small and smaller values * when it is large. * * @see PhraseQuery#setSlop(int) * @param distance the edit distance of this sloppy phrase match * @return the frequency increment for this match */ public abstract float sloppyFreq(int distance); /** Computes a score factor based on a term or phrase's frequency in a * document. This value is multiplied by the {@link #idf(Term, Searcher)} * factor for each term in the query and these products are then summed to * form the initial score for a document. * *

Terms and phrases repeated in a document indicate the topic of the * document, so implementations of this method usually return larger values * when freq is large, and smaller values when freq * is small. * * @param freq the frequency of a term within a document * @return a score factor based on a term's within-document frequency */ public abstract float tf(float freq); /** Computes a score factor for a simple term. * *

The default implementation is:

   *   return idf(searcher.docFreq(term), searcher.maxDoc());
   * 
* * Note that {@link Searcher#maxDoc()} is used instead of * {@link IndexReader#numDocs()} because it is proportional to * {@link Searcher#docFreq(Term)} , i.e., when one is inaccurate, * so is the other, and in the same direction. * * @param term the term in question * @param searcher the document collection being searched * @return a score factor for the term */ public float idf(Term term, Searcher searcher) throws IOException { return idf(searcher.docFreq(term), searcher.maxDoc()); } /** Computes a score factor for a phrase. * *

The default implementation sums the {@link #idf(Term,Searcher)} factor * for each term in the phrase. * * @param terms the terms in the phrase * @param searcher the document collection being searched * @return a score factor for the phrase */ public float idf(Collection terms, Searcher searcher) throws IOException { float idf = 0.0f; Iterator i = terms.iterator(); while (i.hasNext()) { idf += idf((Term)i.next(), searcher); } return idf; } /** Computes a score factor based on a term's document frequency (the number * of documents which contain the term). This value is multiplied by the * {@link #tf(int)} factor for each term in the query and these products are * then summed to form the initial score for a document. * *

Terms that occur in fewer documents are better indicators of topic, so * implementations of this method usually return larger values for rare terms, * and smaller values for common terms. * * @param docFreq the number of documents which contain the term * @param numDocs the total number of documents in the collection * @return a score factor based on the term's document frequency */ public abstract float idf(int docFreq, int numDocs); /** Computes a score factor based on the fraction of all query terms that a * document contains. This value is multiplied into scores. * *

The presence of a large portion of the query terms indicates a better * match with the query, so implementations of this method usually return * larger values when the ratio between these parameters is large and smaller * values when the ratio between them is small. * * @param overlap the number of query terms matched in the document * @param maxOverlap the total number of terms in the query * @return a score factor based on term overlap with the query */ public abstract float coord(int overlap, int maxOverlap); }