-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
227 lines (195 loc) · 7.26 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
#=============================================================================
# FileName: main.py
# Tips: 运行程序之前,确保redis-server已运行
# Author: coolws
# Email: [email protected]
#=============================================================================
'''
from BeautifulSoup import BeautifulSoup
from conf import *
import smtplib
from email.mime.text import MIMEText
import re
import redis
import requests
import datetime
from apscheduler.scheduler import Scheduler
class Crawler:
def __init__(self):
self.rs = redis.Redis(host=REDIS_IP, port=REDIS_PORT)
self.http_querys = HTTP_QUERYS
#获取目标HTML中的招聘信息
def _parseHtmlToUrls(self, **http_query):
flag = True
mode = 0
host = http_query['host']
url = http_query['url']
href = http_query['href']
source = http_query['source']
r = requests.get(url, headers=headers)
r.encoding = 'GBK'
#print "********"
#print r.text
#print "********"
soup = BeautifulSoup(r.text)
attrs = {
'href' : re.compile(href),
'title' : None,
}
results = soup.findAll('a', attrs=attrs)
urls = []
for res in results:
if res.parent.parent.get('class') != 'top':
res['href'] = host + res['href']
#print res['href']
if res.string == None:
continue
else:
res.string += u" 来源:"+ source
if res.parent.previousSibling.string == None:
mode = 1
time = res.parent.nextSibling.string # seu
#print "previousSibling"+time
if(time.find('-')<0):
times = time.split('&')[0].split(':')
urls.append(res)
else:
times = time.split("-")
if Crawler.isWithinDays(times[0],times[1],times[2]):
urls.append(res)
else:
flag = False
else:
mode = 2
time = res.parent.previousSibling.string #sjtu
match = re.compile('\s+')
times = match.split(time)
#print times
monthIndex = Crawler.isMonth(times[0])
if Crawler.isWithinDays(2015,monthIndex,times[1]):#time要修改下
urls.append(res)
else:
flag = False
if mode==1 and flag == True:
attrs = {
'class' : "page-select",
'target' : None,
}
results = soup.findAll('li', attrs=attrs)
nextUrl = {}
nextUrl['host'] = host
nextUrl['url'] = host + results[0].nextSibling.next['href']
nextUrl['href'] = href
nextUrl['source'] = source
#print "@@@@@@@1@@@@@@@@@@@@@"
self.http_querys.append(nextUrl)
if mode==2 and flag == True:
results = soup.findAll(text=u"上一页")
nextUrl = {}
nextUrl['host'] = host
nextUrl['url'] = host + results[0].parent['href']
nextUrl['href'] = href
nextUrl['source'] = source
#print "@@@@@@@2@@@@@@@@@@@@@"
self.http_querys.append(nextUrl)
return urls
#判断是否包括标记感兴趣的信息
@staticmethod
def isContainElements(str, tup):
if filter(lambda x: x in str, tup):
return True
return False
#判断是否在感兴趣的时间内
@staticmethod
def isMonth(str):
return timezone.index(str)
#判断是否在感兴趣的时间内
@staticmethod
def isWithinDays(year,month,day):
today = datetime.date.today()
day = datetime.date(int(year),int(month),int(day))
delta = today - day
days = delta.days
if days <= INTERVAL_DAYS :
return True
else:
return False
#将获取到的页面数据根据关键词处理后保存在redis中
def _putMessageUrlIntoRedis(self, url):
title = url.string
title_remove_source = title.rsplit(u'来源')[0]
#print "###"+title
if FILETER_WORDS == None:
if KEY_WORDS == None:
self.rs.sadd('message_urls',url)
#print title_remove_source
else:
if Crawler.isContainElements(title_remove_source, KEY_WORDS):
self.rs.sadd('message_urls',url)
#print title_remove_source
else:
if not Crawler.isContainElements(title_remove_source, FILETER_WORDS):
if KEY_WORDS == None:
self.rs.sadd('message_urls',url)
#print title_remove_source
else:
if Crawler.isContainElements(title_remove_source, KEY_WORDS):
self.rs.sadd('message_urls',url)
#print title_remove_source
#将感兴趣的页面数据保存在redis中
def _putUrlsIntoRedis(self, urls):
for url in urls:
self._putMessageUrlIntoRedis(url)
#从redis中获取页面的信息
def _getMessageUrlsFromRedis(self):
ret = self.rs.smembers('message_urls')
urls = ""
for herf in ret:
urls += herf + "<br>"
return len(ret), urls
#发送招聘信息
def sendMessage(self):
msg_num, content = self._getMessageUrlsFromRedis()
if msg_num <= 0 :
print "none messages to send..."
return
sub = "[找工作,找实习] 共有%d条信息" % msg_num
#print sub
msg = MIMEText(content, 'html', 'utf-8')
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1, utf-8"
msg['Subject'] = sub
msg['From'] = SEND_EMAIL
msg['to'] = ",".join(RECEIVE_MAIL_LIST)
try:
smtp = smtplib.SMTP()
smtp.connect(SEND_MAIL_HOST)
smtp.starttls()
smtp.login(SEND_MAIL_USER, SEND_MAIL_PASSWORD)
smtp.sendmail(SEND_EMAIL,RECEIVE_MAIL_LIST,msg.as_string())
print "send message sucessfully..."
except Exception, e:
print "fail to send message: "+ str(e)
finally:
smtp.quit()
def run(self):
print "crawler is going to start..."
print "it may take several seconds,please wait..."
self.rs.delete('message_urls')
for http_query in self.http_querys :
urls = self._parseHtmlToUrls(**http_query)
self._putUrlsIntoRedis(urls)
print "crawler has finished..."
if __name__ == '__main__':
crawler = Crawler()
crawler.run()
crawler.sendMessage()
#设置爬取招聘信息的间隔时间 #单位/小时
sched = Scheduler()
sched.daemonic = False
sched.add_interval_job(crawler.run, hours=48)
sched.add_interval_job(crawler.sendMessage, hours=48)
sched.start()