import java.io.StringReader
import org.apache.lucene.analysis.*
import org.apache.lucene.analysis.cjk.CJKAnalyzer
import org.apache.lucene.analysis.cn.ChineseAnalyzer
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.mira.lucene.analysis.MIK_CAnalyzer
public class JeAnalyzer {
public static void testStandard(String testString) {
try {
Analyzer analyzer = new StandardAnalyzer()
Reader r = new StringReader(testString)
StopFilter sf = (StopFilter) analyzer.tokenStream("", r)
System.err.println("=====standard analyzer====")
Token t
while ((t = sf.next()) != null) {
System.out.println(t.termText())
}
} catch (Exception e) {
e.printStackTrace()
}
}
public static void testCJK(String testString) {
try {
Analyzer analyzer = new CJKAnalyzer()
旅和 Reader r = new StringReader(testString)
StopFilter sf = (StopFilter) analyzer.tokenStream("", r)
System.err.println("=====cjk analyzer====")
Token t
while ((t = sf.next()) != null) {
System.out.println(t.termText())
拆腊盯 }
} catch (Exception e) {
e.printStackTrace()
}
}
public static void testChiniese(String testString) {
try {
Analyzer analyzer = new ChineseAnalyzer()
Reader r = new StringReader(testString)
TokenFilter tf = (TokenFilter) analyzer.tokenStream("", r)
System.err.println("=====chinese analyzer====")
Token t
while ((t = tf.next()) != null) {
System.out.println(t.termText())
}
} catch (Exception e) {
e.printStackTrace()
局仔 }
}
public static String transJe(String testString, String c1, String c2) {
String result = ""
try {
Analyzer analyzer = new MIK_CAnalyzer()
Reader r = new StringReader(testString)
TokenStream ts = (TokenStream) analyzer.tokenStream("", r)
Token t
while ((t = ts.next()) != null) {
result += t.termText() + ","
}
} catch (Exception e) {
e.printStackTrace()
}
return result
}
public static void main(String[] args) {
try {
String testString = "中文分词的方法其实不局限于中文应用,也被应用到英文处理,如手写识别,单词之间的空格就很清楚,中文分词方法可以帮助判别英文单词的边界"
System.out.println("测试的语句 "+testString)
String sResult[] = transJe(testString, "gb2312", "utf-8").split(",")
for (int i = 0 i < sResult.length i++) {
System.out.println(sResult[i])
}
} catch (Exception e) {
e.printStackTrace()
}
}
}
jar包
lucene-analyzers-2.4.1.jar
lucene-core-2.4.1.jar
IKAnalyzer2.0.2OBF.jar
下面这个程序是对一个文本文件里的内容进行分词知桐的程序:test.py
[python] view plain copy
#!/usr/bin/python
#-*- encoding:utf-8 -*-
import jieba 老猛陆 #导入jieba模块
def splitSentence(inputFile, outputFile):
fin = open(inputFile, 'r') #以读的方式打开文件
fout = open(outputFile, 'w'侍顷) #以写得方式打开文件
for eachLine in fin:
line = eachLine.strip().decode('utf-8', 'ignore') #去除每行首尾可能出现的空格,并转为Unicode进行处理
wordList = list(jieba.cut(line)) #用结巴分词,对每行内容进行分词
outStr = ''
for word in wordList:
outStr += word
outStr += '/ '
fout.write(outStr.strip().encode('utf-8') + '\n') #将分词好的结果写入到输出文件
fin.close()
fout.close()
splitSentence('myInput.txt', 'myOutput.txt')
写完程序之后,在Linux重点输入:python test.py即可运行程序进行分词。
输入的文件内容如下所示:
经过结巴分词后,输出结果如下所示:
注意:第11行的 jieba.cut()返回的结构是一个可迭代的generator,可以用list(jieba.cut(...))转化为list
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)