导入SqlServer JDBC的驱动,
SQLServer的JDBC URL=
jdbc:sqlserver://172.30.202.21:1433DatabaseName=AirAutoMonitor
3. 获得连接的代码
public static Connection getConnection(String url, String username, String password)throws ResourceDirectoryException {
Connection conn = null
String driverName = ""
Properties props = new Properties()
props.put("user", username)
props.put("password", password)
if (url != null || !"".equals(url)) {
if (url.indexOf("oracle") > -1) {
databaseType = "oracle"
props.put("remarksReporting", "true")
driverName = "oracle.jdbc.driver.OracleDriver"
}
if (url.indexOf("sqlserver") > -1) {
databaseType = "sqlserver"
driverName = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
}
if (url.indexOf("mysql") > -1) {
databaseType = "mysql"
driverName = "com.mysql.jdbc.Driver"
}
}
try {
Class.forName(driverName)
conn = DriverManager.getConnection(url, props)
} catch (ClassNotFoundException e) {
throw new ResourceDirectoryException(e)
} catch (SQLException e) {
throw new ResourceDirectoryException(e)
}
return conn
}
上面的代码是获得Oracle, MySQL, SqlServer的数据库连接的通用方法。
1.为了能够将爬取到的数据存入本地数据库,现在本地创建一个mysql数据库example,然后在数据库中建立一张表格test,示例如下:
view sourceprint?
01.<code class="hljs sql">DROP TABLE IF EXISTS `test`
02.CREATE TABLE `douban_db` (
03.`id` int(11) NOT NULL AUTO_INCREMENT,
04.`url` varchar(20) NOT NULL,
05.`direct` varchar(30),
06.`performer` date,
07.`type` varchar(30),
08.`district` varchar(20) NOT NULL,
09.`language` varchar(30),
10.`date` varchar(30),
11.`time` varchar(30),
12.`alias` varchar(20) NOT NULL,
13.`score` varchar(30),
14.`comments` varchar(300),
15.`scenario` varchar(300),
16.`IMDb` varchar(30),
17.PRIMARY KEY (`id`)
18.) ENGINE=MyISAM DEFAULT CHARSET=utf8</code>
2.如果使用开源框架pyspider来进行爬虫的话,默认情况下,会把爬取到的结果存放到result.db这个sqilite数据库中,但是为了方便 *** 作,我们将结果存放到mysql中。接下
来要做的一个 *** 作就是重写on_result方法,实例化调用我们自己实现的SQL方法,具体
实例如下:
view sourceprint?
01.<code class="hljs sql"><code class="hljs python">#!/usr/bin/env python
02.# -*- encoding: utf-8 -*-
03.# Created on 2015-03-20 09:46:20
04.# Project: fly_spider
05.
06.import re
07.from pyspider.database.mysql.mysqldb import SQL
08.from pyspider.libs.base_handler import *
09.
10.
11.class Handler(BaseHandler):
12.
13.headers= {
14.'Accept':'text/html,application/xhtml+xml,application/xmlq=0.9,image/webp,*/*q=0.8',
15.'Accept-Encoding':'gzip, deflate, sdch',
16.'Accept-Language':'zh-CN,zhq=0.8',
17.'Cache-Control':'max-age=0',
18.'Connection':'keep-alive',
19.'User-Agent':'Mozilla/5.0 (Windows NT 6.1WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36'
20.}
21.
22.crawl_config = {
23.'headers' : headers,
24.'timeout' : 100
25.}
26.@every(minutes=24 * 60)
27.def on_start(self):
28.self.crawl('http://movie.douban.com/tag/', callback=self.index_page)
29.
30.@config(age=10 * 24 * 60 * 60)
31.def index_page(self, response):
32.for each in response.doc('a[href^='http']').items():
33.if re.match('http://movie.douban.com/tag/w+', each.attr.href, re.U):
34.self.crawl(each.attr.href, callback=self.list_page)
35.@config(age=10*24*60*60, priority=2)
36.def list_page(self, response):
37.for each in response.doc('html >body >div#wrapper >div#content >div.grid-16-8.clearfix >div.article >div >table tr.item >td >div.pl2 >a').items():
38.self.crawl(each.attr.href, priority=9, callback=self.detail_page)
39.
40.@config(priority=3)
41.def detail_page(self, response):
42.return {
43.'url': response.url,
44.'title': response.doc('html >body >#wrapper >#content >h3 >span').text(),
45.'direct': ','.join(x.text() for x in response.doc('a[rel='v:directedBy']').items()),
46.'performer': ','.join(x.text() for x in response.doc('a[rel='v:starring']').items()),
47.'type': ','.join(x.text() for x in response.doc('span[property='v:genre']').items()),
48.#'district': ''.join(x.text() for x in response.doc('a[rel='v:starring']').items()),
49.#'language': ''.join(x.text() for x in response.doc('a[rel='v:starring']').items()),
50.'date': ','.join(x.text() for x in response.doc('span[property='v:initialReleaseDate']').items()),
51.'time': ','.join(x.text() for x in response.doc('span[property='v:runtime']').items()),
52.#'alias': ''.join(x.text() for x in response.doc('a[rel='v:starring']').items()),
53.'score': response.doc('.rating_num').text(),
54.'comments': response.doc('html >body >div#wrapper >div#content >div.grid-16-8.clearfix >div.article >div#comments-section >div.mod-hd >h4 >i').text(),
55.'scenario': response.doc('html >body >div#wrapper >div#content >div.grid-16-8.clearfix >div.article >div.related-info >div#link-report.indent').text(),
56.'IMDb': ''.join(x.text() for x in response.doc('span[href]').items()),
57.}
58.
59.def on_result(self, result):
60.if not result or not result['title']:
61.return
62.sql = SQL()
63.sql.replace('douban_db',**result)
64.</code></code>
关于上面这段代码,有下面几点需要说明的:
a. 为了避免服务器判断出客户端在进行爬虫 *** 作,从而禁止ip访问(具体表现为出现403禁止访问),我们需要在发出请求的时候加上一个http头,伪装成使用浏览器访问,具体用法如下:
view sourceprint?
01.<code class="hljs sql"><code class="hljs python"><code class="hljs bash"> headers= {
02.'Accept':'text/html,application/xhtml+xml,application/xmlq=0.9,image/webp,*/*q=0.8',
03.'Accept-Encoding':'gzip, deflate, sdch',
04.'Accept-Language':'zh-CN,zhq=0.8',
05.'Cache-Control':'max-age=0',
06.'Connection':'keep-alive',
07.'User-Agent':'Mozilla/5.0 (Windows NT 6.1WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36'
08.}
09.
10.crawl_config = {
11.'headers' : headers,
12.'timeout' : 100
13.}</code></code></code>
b. @every(minutes=24 * 60)表示每天执行一次
@config(age=10 * 24 * 60 * 60)表示数据10天后就过期了
c. 接下来是一个比较重要的地方,重写on_result方法,相当于实现了一个多态,程序在最后返回时,会执行on_result方法,默认的情况下,on_result是将数据刷入sqlite中,但是如果我们需要将数据插入mysql中,就需要重写on_result方法,具体使用如下:
view sourceprint?
1.<code class="hljs sql"><code class="hljs python"><code class="hljs bash"><code class="hljs python">
2.def on_result(self, result):
3.if not result or not result['title']:
4.return
5.sql = SQL()
6.sql.replace('test',**result)
7.</code></code></code></code>
注意这里的if not result or not result[‘title’]:这句判断很重要,不然的会会报错,提示result是未定义类型的。
3.在上面的额代码中,提到了实例化调用我们自己实现的SQL方法,并且引用了from pyspider.database.mysql.mysqldb import SQL这个库文件,那么就必须在这个目录下实现这个库,具体如下:
把下面内容文文放到pyspider/pyspider/database/mysql/目录下命名为mysqldb.py
view sourceprint?
01.<code class="hljs sql"><code class="hljs python"><code class="hljs bash"><code class="hljs python"><code class="hljs python">from six import itervalues
02.import mysql.connector
03.from datetime import date, datetime, timedelta
04.
05.class SQL:
06.
07.username = 'root' #数据库用户名
08.pass<a href="http://www.it165.net/edu/ebg/" target="_blank" class="keylink">word</a>= 'root' #数据库密码
09.database = 'test' #数据库
10.host = '172.30.25.231' #数据库主机地址
11.connection = ''
12.connect = True
13.placeholder = '%s'
14.
15.def __init__(self):
16.if self.connect:
17.SQL.connect(self)
18.def escape(self,string):
19.return '`%s`' % string
20.def connect(self):
21.config = {
22.'user':SQL.username,
23.'pass<a href="http://www.it165.net/edu/ebg/" target="_blank" class="keylink">word</a>':SQL.password,
24.'host':SQL.host
25.}
26.if SQL.database != None:
27.config['database'] = SQL.database
28.
29.try:
30.cnx = mysql.connector.connect(**config)
31.SQL.connection = cnx
32.return True
33.except mysql.connector.Error as err:
34.
35.if (err.errno == errorcode.ER_ACCESS_DENIED_ERROR):
36.print 'The credentials you provided are not correct.'
37.elif (err.errno == errorcode.ER_BAD_DB_ERROR):
38.print 'The database you provided does not exist.'
39.else:
40.print 'Something went wrong: ' , err
41.return False
42.
43.
44.def replace(self,tablename=None,**values):
45.if SQL.connection == '':
46.print 'Please connect first'
47.return False
48.
49.tablename = self.escape(tablename )
50.if values:
51._keys = ', '.join(self.escape(k) for k in values)
52._values = ', '.join([self.placeholder, ] * len(values))
53.sql_query = 'REPLACE INTO %s (%s) VALUES (%s)' % (tablename, _keys, _values)
54.else:
55.sql_query = 'REPLACE INTO %s DEFAULT VALUES' % tablename
56.
57.
58.cur = SQL.connection.cursor()
59.try:
60.if values:
61.cur.execute(sql_query, list(itervalues(values)))
62.else:
63.cur.execute(sql_query)
64.SQL.connection.commit()
65.return True
66.except mysql.connector.Error as err:
67.print ('An error occured: {}'.format(err))
68.return False
69.</code></code></code></code></code>
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)