-
Notifications
You must be signed in to change notification settings - Fork 24
/
Copy pathEmailScraping.py
74 lines (63 loc) · 1.85 KB
/
EmailScraping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#! python3
import re, urllib.request, time
emailRegex = re.compile(r'''
#example :
(
([a-zA-Z0-9_.+]+
@
[a-zA-Z0-9_.+]+)
)
''', re.VERBOSE)
#Extacting Emails
def extractEmailsFromUrlText(urlText):
extractedEmail = emailRegex.findall(urlText)
allemails = []
for email in extractedEmail:
allemails.append(email[0])
lenh = len(allemails)
print("\tNumber of Emails : %s\n"%lenh )
seen = set()
for email in allemails:
if email not in seen: # faster than `word not in output`
seen.add(email)
emailFile.write(email+"\n")#appending Emails to a filerea
#HtmlPage Read Func
def htmlPageRead(url, i):
try:
start = time.time()
headers = { 'User-Agent' : 'Mozilla/5.0' }
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
urlHtmlPageRead = response.read()
urlText = urlHtmlPageRead.decode()
print ("%s.%s\tFetched in : %s" % (i, url, (time.time() - start)))
extractEmailsFromUrlText(urlText)
except:
pass
#EmailsLeechFunction
def emailsLeechFunc(url, i):
try:
htmlPageRead(url,i)
except urllib.error.HTTPError as err:
if err.code == 404:
try:
url = 'http://webcache.googleusercontent.com/search?q=cache:'+url
htmlPageRead(url, i)
except:
pass
else:
pass
# TODO: Open a file for reading urls
start = time.time()
urlFile = open("urls.txt", 'r')
emailFile = open("emails.txt", 'a')
i=0
#Iterate Opened file for getting single url
for urlLink in urlFile.readlines():
urlLink = urlLink.strip('\'"')
i=i+1
emailsLeechFunc(urlLink, i)
print ("Elapsed Time: %s" % (time.time() - start))
urlFile.close()
emailFile.close()