forked from asmmhossain/pyCOMET
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpyCOMET_subtype.py
381 lines (295 loc) · 12.6 KB
/
pyCOMET_subtype.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
#!/usr/bin/env python3
'''
pyCOMET_subtype.py
a python3 based program to use PPMD models for subtype classification
The program takes as input the following:
- a model description file in msgpack format
- a fixed context size
This model gives log likelihoods of the probablities of seeing a residue at
a particular position given the context
These likelihoods then can be used to predict the subtype
'''
import math, os, sys, argparse, time
import umsgpack
import json
from Bio import SeqIO
import numpy as np
##********************************************************##
def contextThreshold(x):
try:
x = int(x)
except ValueError as e:
sys.exit(e)
if x <= 0:
raise argparse.ArgumentTypeError('%r must be a positive integer' % (x,))
return x
#*******************************************************
#*******************************************************
def getNonRedundantListOrder(lst):
seen = set()
seen_add = seen.add
return [x for x in lst if not (x in seen or seen_add(x))]
#*******************************************************
#*******************************************************
def pLike(c,ctx,cLen,sType,pScore=0.0):
conList = [] # holds exclusion kmers
kSize = cLen # current value for k
for z in range(cLen+1):
cCtx = ctx[z:]
if cCtx not in ppmd_models[sType][kSize]:
pass
else: # k-mer present in the model
exclusion = 0
for kmer in conList:
if kmer in ppmd_models[sType][kSize][cCtx]:
exclusion += ppmd_models[sType][kSize][cCtx][kmer]
sumCount = sum(ppmd_models[sType][kSize][cCtx].values())
if c in ppmd_models[sType][kSize][cCtx]:
try:
prob = float(ppmd_models[sType][kSize][cCtx][c]) / (sumCount - exclusion)
pScore += math.log10(prob)
#print(prob,pScore)
return pScore
except (ZeroDivisionError, ValueError) as e:
sys.exit(e)
else:
conList = list(ppmd_models[sType][kSize][cCtx].keys())
conList.remove('esc')
escape = ppmd_models[sType][kSize][cCtx]['esc']
try:
prob = float(escape) / (sumCount - exclusion)
pScore += math.log10(prob)
except (ZeroDivisionError, ValueError) as e:
sys.exit(e)
kSize -= 1
return pScore
#*******************************************************
#*******************************************************
def challenge(sLike,target,nSubtypes,start,end,thr):
'''This function computes the sum of likelihoods for each subtypes
in the given window and finds the most likely subtype
'''
# get the sum of likelihoods for each subtypes
#sumLL = [sum(sLike[i][start:end])for i in range(nSubtypes)]
sumLL = np.sum(sLike[:,start:end],axis=1)
# find the maximum likelihood and index
#maxLL = max(sumLL)
maxLL = np.amax(sumLL)
#maxIndex = sumLL.index(maxLL)
maxIndex = np.argmax(sumLL)
# return best matching subtype according to COMET's decision tree
if (maxLL - sumLL[target]) <= thr:
return target
else:
return maxIndex
#*******************************************************
#****************************************************
def check_subtype(sLike,seqId,subtypes,nSubtypes,qLen,pIndex,args):
'''
Uses COMET's desition tree to call subtypes for a query
sLike: likelihood matrix
seqID: sequence identifier
subtypes: list of subtype names
nSubtypes: number of reference subtypes
qLen: length of the query sequence
args: command line arguments
'''
thr = args.thr
# get the sum of likelihoods for all subtypes
#sumLL = [sum(sLike[i]) for i in range(nSubtypes)]
sumLL = np.sum(sLike,axis=1)
#print(sumLL)
# find the index of most likely subtype PURE/CRF
#maxS = max(sumLL)
maxS = np.amax(sumLL)
#S = sumLL.index(maxS)
S = np.argmax(sumLL)
# find the most likely PURE subtype
#maxPS = max(sumLL[pIndex:])
maxPS = np.amax(sumLL[pIndex:])
#PS = (sumLL[pIndex:].index(maxPS)) + pIndex
PS = np.argmax(sumLL[pIndex:]) + pIndex
#print(seqId, subtypes[S],maxS,subtypes[PS],maxPS)
#with open(args.outFile,'a') as fh:
#fh.write('{}\t{}\t{}\t{}\t{}\n'.format(seqId, subtypes[S],maxS,subtypes[PS],maxPS))
#return
wSize = args.wSize # set window size
bSize = args.bSize # set the step size
# Pre-compute number of windows
numOfWindows = int((qLen-wSize)/bSize)+1
#print('numOfWindows',numOfWindows)
# check the PURE subtype first
# create a list of subtype assignment for each window
subAssignment = [PS]*numOfWindows
iWindow = 0
# get the most likely subtype for each window
for i in range(0,numOfWindows*bSize,bSize):
start = i
end = i + wSize
#print(iWindow)
subAssignment[iWindow] = challenge(sLike,PS,nSubtypes,start,end,thr)
iWindow += 1
if S == PS:
assignedSubtypes = getNonRedundantListOrder(subAssignment)
if len(assignedSubtypes) == 1:
msg = seqId + '\t' + subtypes[PS] + '\t(PURE)'
#print(msg)
return msg
else:
msg = seqId + '\t' + 'unassigned_1\t'
for asub in assignedSubtypes:
msg += subtypes[asub] + ' '
#print(msg)
return msg
else: # S != PS
assignedSubtypes = getNonRedundantListOrder(subAssignment)
if len(assignedSubtypes) == 1:
msg = seqId + '\t' + subtypes[PS] + '\t(Check ' + subtypes[S] + ')'
#print(msg)
return msg
else: # needs checking CRF
subAssignment = [S]*numOfWindows
iWindow = 0
# get the most likely subtype for each window
for i in range(0,numOfWindows*bSize,bSize):
start = i
end = i + wSize
#print(iWindow)
subAssignment[iWindow] = challenge(sLike,S,nSubtypes,start,end,thr)
iWindow += 1
assignedSubtypes = getNonRedundantListOrder(subAssignment)
if len(assignedSubtypes) == 1:
msg = seqId + '\t' + subtypes[S] + '\t(CRF)'
#print(msg)
return msg
else:
msg = seqId + '\t' + 'unassigned_2\t'
for asub in assignedSubtypes:
msg += subtypes[asub] + ' '
#print(msg)
return msg
#****************************************************
#****************************************************
def calculateLogLikelihood(query,subtypes,nSubtypes,pIndex,args):
'''
Calculates likelihood values for a sequence
'''
# convert sequences into upper case and remove gaps
qSeq = str(query.seq).upper().replace('-','')
qLen = len(qSeq)
# Create a list to hold likelihoods for all the nucleotide positions
# each row represents likelihoods generated by each of the subtype PPMD models
#lMatrix = [[]]*nSubtypes
lMatrix = np.zeros((nSubtypes,qLen))
# get the context size
cSize = int(args.context)
# generate likelihoods based on each subtype models
for r in range(nSubtypes):
# 'bits' holds likelihood values of all the nucleotide positions
#bits = [0.0]*qLen
bits = np.zeros(qLen)
# get likelihood for all the residue positions
for j in range(qLen): # qLen
# the nucleotide at position 'j'
c = qSeq[j]
# do not calculate for ambiguous characters
#if c not in dna:
# continue
# extract the current context
start = j - cSize if j > cSize else 0
ctx = qSeq[start:j]
lctx = j-start
# calculate likelihood for only 'ACGT'
if set(ctx).issubset('ACGT'):
lPos = pLike(c,ctx,lctx,subtypes[r])
bits[j] = lPos
#lPos = pLike(c,ctx,lctx,subtypes[r])
#if lPos == None:
#print(j,subtypes[r],c,ctx)
#bits[j] = lPos
#lPos = predictLikelihood(c,ctx,lctx,[],subtypes[r],0.0)
#bits[j] = sum(bitList)
#break
#print(bits)
#print(loaded_subtypes[r],sum(bits))
## create a list of likelihood values
## for each query based on each reference
## for each residue position
lMatrix[r] = bits
#print(sLike[0][0:10])
#print(len(sLike))
return check_subtype(lMatrix,query.id,subtypes,nSubtypes,qLen,pIndex,args)
#****************************************************
##**************************************************
def getArguments():
'''
Parse all the command line arguments from the user
'''
parser = argparse.ArgumentParser(description='Predicts subtype of a sequence based on PPMD models trained using reference sequences', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-q','--query',required=True,help='Query sequence file in FASTA format for subtype classification')
parser.add_argument('-c','--context',type=contextThreshold,default=8,help='Context size for PPMD model (default: 8)')
parser.add_argument('-w','--wSize',type=contextThreshold,default=100,help='Window size for COMET decision tree (default: 100)')
parser.add_argument('-b','--bSize',type=contextThreshold,default=3,help='Step size of the windows for COMET decision tree (default: 3)')
parser.add_argument('-m','--modelFile',required=True,help='PPMD model file for reference sequences')
parser.add_argument('-o','--outFile',required=True,help='Output file for subtype prediction results')
parser.add_argument('-t','--thr',type=contextThreshold,default=28,help='Threshold difference used in decision tree (default: 28)')
args = parser.parse_args()
return args
##********************************************************##
#***********************************************************
if __name__=="__main__":
args = getArguments()
## get context size
cSize = int(args.context)
## Create dna alphabet
#dna = ['A','C','G','T']
## define the digits list
#digits = list('0123456789')
## read in the model file and populate the ppmd list
#fh = open(args.modelFile,'rb')
#ppmd_models = umsgpack.load(fh)
try:
with open(args.modelFile,'r') as fh:
ppmd_models = json.load(fh)
except FileNotFoundError as e:
eMsg = '\nThe pyCOMET model file <{}> could not be found.'.format(args.modelFile)
eMsg += ' Please try again with correct model file name.\n'
print(eMsg)
sys.exit()
## get names of subtype present in the loaded PPMD models
loaded_subtypes = sorted(list(ppmd_models.keys()))
## Remove 'CPZ' from subtype list
try:
loaded_subtypes.remove('CPZ')
except:
pass
#print(ppmd_models['A1'][8])
#sys.exit(0)
nSubtypes = len(loaded_subtypes)
# get the index of 'A1'; this marks the start index of PURE subtypes
pIndex = loaded_subtypes.index('A1')
## read in the query sequences
try:
qSeqs = list(SeqIO.parse(args.query,'fasta'))
except FileNotFoundError as e:
eMsg = '\nThe query sequence file <{}> could not be found.'.format(args.query)
eMsg += ' Please try again with correct sequence file name.\n'
print(eMsg)
sys.exit()
## check if the sequences were read properly
if len(qSeqs) == 0:
msg = 'Query sequences were not read properly'
msg += '\nPlease run again with valid FASTA sequence file with at least one sequence\n'
sys.exit(msg)
## open output file for storing predicted subtypes
#fh = open(args.outFile,'w')
# calls calculateLogLikelihood() to generate subtypes
for query in qSeqs: #len(seqs)
#print(query.id,len(query.seq))
# calculate likelihood matrix
tMsg = calculateLogLikelihood(query,loaded_subtypes,nSubtypes,pIndex,args)
print('{}'.format(tMsg))
with open(args.outFile,'a') as fh:
fh.write('{}\n'.format(tMsg))
#fh.close()
#***********************************************************