分词预处理java代码 java分词工具类

java如何分词??如果你的分词规则是在一个字符串的开头和结尾加上"_",然后两个字符一分的话,代码可以这样写:
import java.util.ArrayList;
import java.util.List;
public class Participle
{
private static final String HEAD_END_STR = "_";
private static final int PARTICIPLE_LENGTH = 2;
public static void main(String[] args)
{
String exampleWord = "计算机";
exampleWord = "_" + exampleWord + "_";
int length = exampleWord.length();
ListString result = new ArrayListString();
for (int i = 0; ilength - 1; i++)
{
String str = exampleWord.substring(i, i + PARTICIPLE_LENGTH);
result.add(str);
}
System.out.println(result);
}
}
输出结果:_计, 计算, 算机, 机_
在线等,比较急?。。∥矣胘ava版的结巴分词写了一段代码,怎样将它输出到指定的txt文件中?String str = segmenter.sentenceProcess(s);
System.out.println(str);
BufferedWriter out = new BufferedWriter(new FileWriter("F:\\out.txt"));
out.append(str);
java编个中文分词的程序import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.cn.ChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.mira.lucene.analysis.MIK_CAnalyzer;
public class JeAnalyzer {
public static void testStandard(String testString) {
try {
Analyzer analyzer = new StandardAnalyzer();
Reader r = new StringReader(testString);
StopFilter sf = (StopFilter) analyzer.tokenStream("", r);
System.err.println("=====standard analyzer====");
Token t;
while ((t = sf.next()) != null) {
System.out.println(t.termText());
}
} catch (Exception e) {
e.printStackTrace();
}
}
public static void testCJK(String testString) {
try {
Analyzer analyzer = new CJKAnalyzer();
Reader r = new StringReader(testString);
StopFilter sf = (StopFilter) analyzer.tokenStream("", r);
System.err.println("=====cjk analyzer====");
Token t;
while ((t = sf.next()) != null) {
System.out.println(t.termText());
}
} catch (Exception e) {
e.printStackTrace();
}
}
public static void testChiniese(String testString) {
try {
Analyzer analyzer = new ChineseAnalyzer();
Reader r = new StringReader(testString);
TokenFilter tf = (TokenFilter) analyzer.tokenStream("", r);
System.err.println("=====chinese analyzer====");
Token t;
while ((t = tf.next()) != null) {
System.out.println(t.termText());
}
} catch (Exception e) {
e.printStackTrace();
}
}
public static String transJe(String testString, String c1, String c2) {
String result = "";
try {
Analyzer analyzer = new MIK_CAnalyzer();
Reader r = new StringReader(testString);
TokenStream ts = (TokenStream) analyzer.tokenStream("", r);
Token t;
while ((t = ts.next()) != null) {
result += t.termText() + ",";
}
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
public static void main(String[] args) {
try {
String testString = "中文分词的方法其实不局限于中文应用,也被应用到英文处理,如手写识别,单词之间的空格就很清楚,中文分词方法可以帮助判别英文单词的边界";
System.out.println("测试的语句"+testString);
String sResult[] = transJe(testString, "gb2312", "utf-8").split(",");
for (int i = 0; isResult.length; i++) {
System.out.println(sResult[i]);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
jar包

推荐阅读