下边为StandardTokenizer.jj的代码,省略了前面的注释!使用Javacc的语法写成的,学过 javacc的语法之后,在看这些代码就比较容易了,而且整个Standard包下其他很多代码都是由此文件生成的.根据此文件可以看 StandardTokenizer用来实现区分token,从而StandardAnalyzer使用的分词方法就是单字切分. 同徽B2B电子商务研究中心
options {
STATIC = false;
//IGNORE_CASE = true;
//BUILD_PARSER = false;
UNICODE_INPUT = true;
USER_CHAR_STREAM = true;
OPTIMIZE_TOKEN_MANAGER = true;
//DEBUG_TOKEN_MANAGER = true;
}
PARSER_BEGIN(StandardTokenizer)
数据挖掘商道
package org.apache.lucene.analysis.standard; 同徽公司
/** A grammar-based tokenizer constructed with JavaCC.
*
* <p> This should be a good tokenizer for most European-language documents.
*
* <p>Many applications have specific tokenizer needs. If this tokenizer does
* not suit your application, please consider copying this source code
* directory to your project and maintaining your own grammar-based tokenizer.
*基于语法的tokenizer
*/
public class StandardTokenizer extends org.apache.lucene.analysis.Tokenizer {
数据挖掘研究院
/** Constructs a tokenizer for this Reader. */
public StandardTokenizer(Reader reader) {
this(new FastCharStream(reader));
this.input = reader;
}
}
数据挖掘研究院
PARSER_END(StandardTokenizer)
/*对于语法的基本表示
* | 或者
* + 一个或者无限个重复
* * 零个或者无限个重复
* [ ] 表示里面所有的选项取一
* <>表示使用别的定义
*
*/ 数据挖掘商道
TOKEN : { // token patterns
//使用正则表达式进行描述
// basic word: a sequence of digits & letters example:13j14234n4k32
//字母数字序列
<ALPHANUM: (<LETTER>|<DIGIT>)+ > 同徽公司
// internal apostrophes: O′Reilly, you′re, O′Reilly′s
// use a post-filter to remove possesives
//缩略语形式
| <APOSTROPHE: <ALPHA> ("′" <ALPHA>)+ > 数据挖掘商道
// acronyms: U.S.A., I.B.M., etc.
// use a post-filter to remove dots
//缩写词
| <ACRONYM: <ALPHA> "." (<ALPHA> ".")+ > 数据挖掘论坛
// company names like AT&T and Excite@Home.
//公司
| <COMPANY: <ALPHA> ("&"|"@") <ALPHA> > 数据挖掘商道
// email addresses [email protected]
//email地址
| <EMAIL: <ALPHANUM> (("."|"-"|"_") <ALPHANUM>)* "@" <ALPHANUM> (("."|"-") <ALPHANUM>)+ > 数据挖掘论坛
// hostname 202.113.9.183
| <HOST: <ALPHANUM> ("." <ALPHANUM>)+ >
数据挖掘研究院
// floating point, serial, model numbers, ip addresses, etc.
// every other segment must have at least one digit
| <NUM: (<ALPHANUM> <P> <HAS_DIGIT>
| <HAS_DIGIT> <P> <ALPHANUM>
| <ALPHANUM> (<P> <HAS_DIGIT> <P> <ALPHANUM>)+
| <HAS_DIGIT> (<P> <ALPHANUM> <P> <HAS_DIGIT>)+
| <ALPHANUM> <P> <HAS_DIGIT> (<P> <ALPHANUM> <P> <HAS_DIGIT>)+
| <HAS_DIGIT> <P> <ALPHANUM> (<P> <HAS_DIGIT> <P> <ALPHANUM>)+
)
>
| <#P: ("_"|"-"|"/"|"."|",") >
| <#HAS_DIGIT: // at least one digit
(<LETTER>|<DIGIT>)* 数据挖掘论坛
<DIGIT>
(<LETTER>|<DIGIT>)*
> 同徽公司
| < #ALPHA: (<LETTER>)+> //字母序列
| < #LETTER: // unicode letters
[ //可以通过unicode字母表查看
"u0041"-"u005a", //ASCII 65(A)-90(Z)
"u0061"-"u007a", //ASCII 97(a)-122(z) 数据挖掘论坛
"u00c0"-"u00d6",
"u00d8"-"u00f6",
"u00f8"-"u00ff",
"u0100"-"u1fff"
]
>
| < CJK: // non-alphabets
[ //可以通过unicode字母对照表查看 数据挖掘商道
"u3040"-"u318f", //韩文
"u3300"-"u337f",
"u3400"-"u3d2d",
"u4e00"-"u9fff",
"uf900"-"ufaff"
]
>
| < #DIGIT: // unicode digits
[
"u0030"-"u0039",
"u0660"-"u0669",
"u06f0"-"u06f9",
"u0966"-"u096f",
"u09e6"-"u09ef",
数据挖掘商道
"u0a66"-"u0a6f",
"u0ae6"-"u0aef",
"u0b66"-"u0b6f",
"u0be7"-"u0bef",
"u0c66"-"u0c6f",
"u0ce6"-"u0cef",
"u0d66"-"u0d6f",
"u0e50"-"u0e59",
"u0ed0"-"u0ed9",
"u1040"-"u1049"
]
>
} 同徽公司
SKIP : { // skip unrecognized chars
<NOISE: ~[] >
}
数据挖掘论坛
/** Returns the next token in the stream, or null at EOS.
* <p>The returned token′s type is set to an element of {@link
* StandardTokenizerConstants#tokenImage}.
*/
//调用了org.apache.lucene.analysis.Token
org.apache.lucene.analysis.Token next() throws IOException :
{
//token的初始化
Token token = null;
}
{
( token = <ALPHANUM> |
token = <APOSTROPHE> |
token = <ACRONYM> |
token = <COMPANY> |
token = <EMAIL> |
token = <HOST> |
token = <NUM> |
token = <CJK> |
token = <EOF>
)
{
if (token.kind == EOF) {
return null;
} else { 同徽公司
//返回一下个token,从这里可以看出StandardAnalyzer使用的为单字切分,对于所有的unicode字符
return
new org.apache.lucene.analysis.Token(token.image,
token.beginColumn,token.endColumn,
tokenImage[token.kind]);
}
}
}