#
source:
orange-bioinformatics/_bioinformatics/obiAssess.py
@
1728:a7d42a8d1ed2

Revision 1728:a7d42a8d1ed2, 21.7 KB checked in by markotoplak, 14 months ago (diff) |
---|

Rev | Line | |
---|---|---|

[844] | 1 | """ |

2 | Construction of gene set scores for each sample. | |

3 | ||

4 | Learners in this module build models needed for construction | |

5 | of features from individual genes. | |

6 | ||

7 | The other classes just take example and return a | |

8 | dictionary of { name: score } for that example. | |

9 | """ | |

10 | ||

[1632] | 11 | from __future__ import absolute_import |

12 | ||

13 | from collections import defaultdict | |

14 | import math | |

15 | ||

[485] | 16 | import numpy |

[1632] | 17 | |

18 | import orange, Orange, statc | |

19 | ||

20 | from . import obiExpression, obiGene, obiGsea, obiGeneSets, stats | |

[485] | 21 | |

22 | def normcdf(x, mi, st): | |

23 | return 0.5*(2. - stats.erfcc((x - mi)/(st*math.sqrt(2)))) | |

24 | ||

25 | class AT_edelmanParametric(object): | |

26 | ||

27 | def __init__(self, **kwargs): | |

28 | for a,b in kwargs.items(): | |

29 | setattr(self, a, b) | |

30 | ||

31 | def __call__(self, nval): | |

32 | ||

33 | if self.mi1 == None or self.mi2 == None or self.st1 == None or self.st2 == None: | |

34 | return 0 | |

35 | ||

36 | try: | |

37 | val = nval.value | |

38 | if nval.isSpecial(): | |

39 | return 0 | |

40 | except: | |

41 | val = nval | |

42 | ||

43 | try: | |

44 | if val >= self.mi1: | |

45 | p1 = 1 - normcdf(val, self.mi1, self.st1) | |

46 | else: | |

47 | p1 = normcdf(val, self.mi1, self.st1) | |

48 | ||

49 | if val >= self.mi2: | |

50 | p2 = 1 - normcdf(val, self.mi2, self.st2) | |

51 | else: | |

52 | p2 = normcdf(val, self.mi2, self.st2) | |

53 | ||

54 | #print p1, p2 | |

55 | return math.log(p1/p2) | |

56 | except: | |

57 | #print p1, p2, "exception" | |

58 | return 0 | |

59 | ||

60 | class AT_edelmanParametricLearner(object): | |

61 | """ | |

62 | Returns attribute transfromer for Edelman parametric measure for a given attribute in the | |

63 | dataset. | |

64 | Edelman et al, 06. | |

65 | ||

66 | Modified a bit. | |

67 | """ | |

68 | ||

69 | def __init__(self, a=None, b=None): | |

70 | """ | |

71 | a and b are choosen class values. | |

72 | """ | |

73 | self.a = a | |

74 | self.b = b | |

75 | ||

76 | def __call__(self, i, data): | |

77 | cv = data.domain.classVar | |

78 | #print data.domain | |

79 | ||

80 | if self.a == None: self.a = cv.values[0] | |

81 | if self.b == None: self.b = cv.values[1] | |

82 | ||

83 | def avWCVal(value): | |

84 | return [ex[i].value for ex in data if ex[-1].value == value and not ex[i].isSpecial() ] | |

85 | ||

86 | list1 = avWCVal(self.a) | |

87 | list2 = avWCVal(self.b) | |

88 | ||

89 | mi1 = mi2 = st1 = st2 = None | |

90 | ||

91 | try: | |

92 | mi1 = statc.mean(list1) | |

93 | st1 = statc.std(list1) | |

94 | except: | |

95 | pass | |

96 | ||

97 | try: | |

98 | mi2 = statc.mean(list2) | |

99 | st2 = statc.std(list2) | |

100 | except: | |

101 | pass | |

102 | ||

103 | return AT_edelmanParametric(mi1=mi1, mi2=mi2, st1=st1, st2=st2) | |

104 | ||

[589] | 105 | class AT_loess(object): |

106 | ||

107 | def __init__(self, **kwargs): | |

108 | for a,b in kwargs.items(): | |

109 | setattr(self, a, b) | |

110 | ||

111 | def __call__(self, nval): | |

112 | ||

113 | val = nval.value | |

114 | if nval.isSpecial(): | |

115 | return 0.0 #middle value | |

116 | #return first class probablity | |

117 | ||

118 | def saveplog(a,b): | |

119 | try: | |

120 | return math.log(a/b) | |

121 | except: | |

122 | if a < b: | |

123 | return -10 | |

124 | else: | |

125 | return +10 | |

126 | ||

127 | try: | |

128 | ocene = self.condprob(val) | |

129 | if sum(ocene) < 0.01: | |

130 | return 0.0 | |

131 | return saveplog(ocene[0], ocene[1]) | |

132 | ||

133 | except: | |

134 | return 0.0 | |

135 | ||

136 | class AT_loessLearner(object): | |

137 | ||

138 | def __call__(self, i, data): | |

139 | cv = data.domain.classVar | |

140 | #print data.domain | |

141 | try: | |

142 | ca = orange.ContingencyAttrClass(data.domain.attributes[i], data) | |

143 | a = orange.ConditionalProbabilityEstimatorConstructor_loess(ca, nPoints=5) | |

144 | return AT_loess(condprob=a) | |

145 | except: | |

146 | return AT_loess(condprob=None) | |

147 | ||

[485] | 148 | def nth(l, n): |

149 | return [a[n] for a in l] | |

150 | ||

151 | class Assess(object): | |

152 | ||

153 | def __init__(self, **kwargs): | |

154 | for a,b in kwargs.items(): | |

155 | setattr(self, a, b) | |

156 | ||

157 | def __call__(self, example): | |

158 | enrichmentScores = [] | |

159 | ||

[488] | 160 | lcor = [ self.attrans[at](example[at]) for at in range(len(self.attrans)) ] |

[485] | 161 | |

162 | ordered = obiGsea.orderedPointersCorr(lcor) | |

163 | ||

164 | def rev(l): | |

165 | return numpy.argsort(l) | |

166 | ||

167 | rev2 = rev(ordered) | |

168 | ||

[699] | 169 | gsetsnumit = self.gsetsnum.items() |

[485] | 170 | |

[699] | 171 | enrichmentScores = dict( |

172 | (name, obiGsea.enrichmentScoreRanked(subset, lcor, ordered, rev2=rev2)[0]) \ | |

173 | for name,subset in gsetsnumit) | |

[1728] | 174 | |

[699] | 175 | return enrichmentScores |

[485] | 176 | |

[844] | 177 | |

[485] | 178 | class AssessLearner(object): |

[844] | 179 | """ |

180 | Uses the underlying GSEA code to select genes. | |

181 | Takes data and creates attribute transformations. | |

182 | """ | |

[485] | 183 | |

[1075] | 184 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None, rankingf=None): |

185 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ | |

[844] | 186 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) |

187 | ||

[485] | 188 | if rankingf == None: |

189 | rankingf = AT_edelmanParametricLearner() | |

190 | ||

[844] | 191 | #rank individual attributes on the training set |

192 | attrans = [ rankingf(iat, data) for iat, at in enumerate(data.domain.attributes) ] | |

[485] | 193 | |

[844] | 194 | return Assess(attrans=attrans, gsetsnum=gsetsnum) |

[485] | 195 | |

[1075] | 196 | def selectGenesetsData(data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None): |

[844] | 197 | """ |

198 | Returns gene sets and data which falling under upper criteria. | |

199 | """ | |

[1075] | 200 | gso = obiGsea.GSEA(data, matcher=matcher, classValues=classValues, atLeast=0) |

[844] | 201 | gso.addGenesets(geneSets) |

[1108] | 202 | okgenesets = gso.selectGenesets(minSize=minSize, maxSize=maxSize, minPart=minPart).keys() |

203 | gsetsnum = gso.to_gsetsnum(okgenesets) | |

204 | return gso.data, okgenesets, gsetsnum | |

[844] | 205 | |

[1075] | 206 | def corgs_activity_score(ex, corg): |

[844] | 207 | """ activity score for a sample for pathway given by corgs """ |

208 | #print [ ex[i].value for i in corg ] #FIXME what to do with unknown values? | |

209 | return sum(ex[i].value if ex[i].value != '?' else 0.0 for i in corg)/len(corg)**0.5 | |

210 | ||

[1075] | 211 | class CORGs(object): |

[844] | 212 | |

213 | def __init__(self, **kwargs): | |

214 | for a,b in kwargs.items(): | |

215 | setattr(self, a, b) | |

216 | ||

217 | def __call__(self, example): | |

[1075] | 218 | return dict( (name,corgs_activity_score(example, corg)) \ |

[844] | 219 | for name, corg in self.corgs.items() ) |

220 | ||

[1075] | 221 | class CORGsLearner(object): |

[844] | 222 | |

[1075] | 223 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None): |

[844] | 224 | """ |

225 | WARNING: input has to be z_ij table! each gene needs to be normalized | |

226 | (mean=0, stdev=1) for all samples. | |

227 | """ | |

[1075] | 228 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ |

[844] | 229 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) |

230 | ||

231 | tscorecache = {} | |

232 | ||

233 | def tscorec(data, at, cache=None): | |

234 | """ Cached attribute tscore calculation """ | |

235 | if cache != None and at in cache: return cache[at] | |

236 | ma = obiExpression.MA_t_test()(at,data) | |

237 | if cache != None: cache[at] = ma | |

238 | return ma | |

239 | ||

240 | def compute_corg(data, inds): | |

241 | """ | |

242 | Compute CORG for this geneset specified with gene inds | |

243 | in the example table. Output is the list of gene inds | |

244 | in CORG. | |

245 | ||

246 | """ | |

247 | #order member genes by their t-scores: decreasing, if av(t-score) >= 0, | |

248 | #else increasing | |

249 | tscores = [ tscorec(data, at, tscorecache) for at in inds ] | |

250 | sortedinds = nth(sorted(zip(inds,tscores), key=lambda x: x[1], \ | |

251 | reverse=statc.mean(tscores) >= 0), 0) | |

252 | ||

253 | def S(corg): | |

254 | """ Activity score separation - S(G) in | |

255 | the article """ | |

256 | asv = orange.FloatVariable(name='AS') | |

[1075] | 257 | asv.getValueFrom = lambda ex,rw: orange.Value(asv, corgs_activity_score(ex, corg)) |

[844] | 258 | data2 = orange.ExampleTable(orange.Domain([asv], data.domain.classVar), data) |

259 | return abs(tscorec(data2, 0)) #FIXME absolute - nothing in the article about it | |

260 | ||

[1009] | 261 | #greedily find CORGS procing the best separation |

[844] | 262 | g = S(sortedinds[:1]) |

263 | bg = 1 | |

264 | for a in range(2, len(sortedinds)+1): | |

265 | tg = S(sortedinds[:a]) | |

266 | if tg > g: | |

267 | g = tg | |

268 | bg = a | |

269 | else: | |

270 | break | |

271 | ||

272 | return sortedinds[:a] | |

273 | ||

274 | #now, on the learning set produce the CORGS for each geneset and | |

275 | #save it for use in further prediction | |

276 | ||

277 | corgs = {} | |

278 | ||

279 | for name, inds in gsetsnum.items(): | |

280 | inds = sorted(set(inds)) # take each gene only once! | |

281 | corgs[name] = compute_corg(data, inds) | |

282 | ||

[1075] | 283 | return CORGs(corgs=corgs) |

284 | ||

285 | class GSA(object): | |

286 | ||

287 | def __init__(self, **kwargs): | |

288 | for a,b in kwargs.items(): | |

289 | setattr(self, a, b) | |

290 | ||

291 | def __call__(self, example): | |

292 | return dict( (name, statc.mean([example[i].value for i in inds if example[i].value != "?"]) ) \ | |

293 | for name, inds in self.subsets.items() ) | |

294 | ||

295 | class GSALearner(object): | |

296 | ||

297 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None): | |

298 | """ | |

299 | WARNING: input has to be z_ij table! each gene needs to be normalized | |

300 | (mean=0, stdev=1) for all samples. | |

301 | """ | |

302 | import scipy.stats | |

303 | ||

304 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ | |

305 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) | |

306 | ||

307 | def tscorec(data, at, cache=None): | |

308 | ma = obiExpression.MA_t_test()(at,data) | |

309 | return ma | |

310 | ||

311 | tscores = [ tscorec(data, at) for at in data.domain.attributes ] | |

312 | ||

313 | def to_z_score(t): | |

314 | return float(scipy.stats.norm.ppf(scipy.stats.t.cdf(t, len(data)-2))) | |

315 | ||

316 | zscores = map(to_z_score, tscores) | |

317 | ||

318 | subsets = {} | |

319 | ||

320 | for name, inds in gsetsnum.items(): | |

321 | inds = sorted(set(inds)) # take each gene only once! | |

322 | ||

323 | D = statc.mean([max(zscores[i],0) for i in inds]) \ | |

324 | + statc.mean([min(zscores[i],0) for i in inds]) | |

325 | ||

326 | if D >= 0: | |

327 | subsets[name] = [ i for i in inds if zscores[i] > 0.0 ] | |

328 | else: | |

329 | subsets[name] = [ i for i in inds if zscores[i] < 0.0 ] | |

330 | ||

331 | return GSA(subsets=subsets) | |

[485] | 332 | |

[977] | 333 | def pls_transform(example, constt): |

[981] | 334 | """ |

335 | Uses calculated PLS weights to transform the given example. | |

336 | """ | |

[978] | 337 | |

338 | inds, xmean, W, P = constt | |

[977] | 339 | dom = orange.Domain([example.domain.attributes[i1] for i1 in inds ], False) |

340 | newex = orange.ExampleTable(dom, [example]) | |

341 | ex = newex.toNumpy()[0] | |

[978] | 342 | ex = ex - xmean # same input transformation |

343 | ||

344 | nc = W.shape[1] | |

345 | ||

346 | TR = numpy.empty((1, nc)) | |

347 | XR = ex | |

348 | ||

349 | dot = numpy.dot | |

350 | ||

351 | for i in range(nc): | |

352 | t = dot(XR, W[:,i].T) | |

[1009] | 353 | XR = XR - t*numpy.array([P[:,i]]) |

[978] | 354 | TR[:,i] = t |

355 | ||

356 | return TR | |

357 | ||

358 | def PLSCall(data, y=None, x=None, nc=None, weight=None, save_partial=False): | |

359 | ||

360 | def normalize(vector): | |

361 | return vector / numpy.linalg.norm(vector) | |

362 | ||

363 | if y == None: | |

364 | y = [ data.domain.classVar ] | |

365 | if x == None: | |

366 | x = [v for v in data.domain.variables if v not in y] | |

367 | ||

368 | Ncomp = nc if nc is not None else len(x) | |

369 | ||

370 | dataX = orange.ExampleTable(orange.Domain(x, False), data) | |

371 | dataY = orange.ExampleTable(orange.Domain(y, False), data) | |

[1414] | 372 | |

[978] | 373 | # transformation to numpy arrays |

374 | X = dataX.toNumpy()[0] | |

375 | Y = dataY.toNumpy()[0] | |

376 | ||

377 | # data dimensions | |

378 | n, mx = numpy.shape(X) | |

379 | my = numpy.shape(Y)[1] | |

380 | ||

381 | # Z-scores of original matrices | |

382 | YMean = numpy.mean(Y, axis = 0) | |

383 | XMean = numpy.mean(X, axis = 0) | |

384 | ||

385 | X = (X-XMean) | |

386 | Y = (Y-YMean) | |

387 | ||

388 | P = numpy.empty((mx,Ncomp)) | |

389 | T = numpy.empty((n,Ncomp)) | |

390 | W = numpy.empty((mx,Ncomp)) | |

391 | E,F = X,Y | |

392 | ||

393 | dot = numpy.dot | |

394 | norm = numpy.linalg.norm | |

395 | ||

396 | #PLS1 - from Gutkin, shamir, Dror: SlimPLS | |

397 | ||

398 | for i in range(Ncomp): | |

[1009] | 399 | w = dot(E.T,F) |

400 | w = w/norm(w) #normalize w in Gutkin et al the do w*c, where c is 1/norm(w) | |

401 | t = dot(E, w) #t_i -> a row vector | |

402 | p = dot(E.T, t)/dot(t.T, t) #p_i t.T is a row vector - this is inner(t.T, t.T) | |

[978] | 403 | q = dot(F.T, t)/dot(t.T, t) #q_i |

404 | ||

405 | E = E - dot(t, p.T) | |

406 | F = F - dot(t, q.T) | |

407 | ||

408 | T[:,i] = t.T | |

409 | W[:,i] = w.T | |

410 | P[:,i] = p.T | |

411 | ||

412 | return XMean, W, P, T | |

[977] | 413 | |

414 | class PLS(object): | |

415 | ||

416 | def __init__(self, **kwargs): | |

417 | for a,b in kwargs.items(): | |

418 | setattr(self, a, b) | |

419 | ||

420 | def __call__(self, example): | |

[978] | 421 | |

422 | od = {} | |

423 | ||

424 | for name, constt in self.constructt.items(): | |

425 | ts = pls_transform(example, constt)[0] | |

426 | if len(ts) == 1: | |

427 | od[name] = ts[0] | |

428 | else: | |

429 | for i,s in enumerate(ts): | |

430 | od[name + "_LC_" + str(i+1)] = s | |

431 | ||

432 | return od | |

[977] | 433 | |

434 | class PLSLearner(object): | |

435 | """ Transforms gene sets using Principal Leasts Squares. """ | |

436 | ||

[1075] | 437 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None, components=1): |

[978] | 438 | """ |

439 | If more that 1 components are used, _LC_componetsNumber is appended to | |

440 | the name of the gene set. | |

441 | """ | |

[977] | 442 | |

[1075] | 443 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ |

[977] | 444 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) |

445 | ||

446 | constructt = {} | |

447 | ||

448 | #build weight matrices for every gene set | |

449 | for name, inds in gsetsnum.items(): | |

450 | dom2 = orange.Domain([ data.domain.attributes[i1] for i1 in inds ], data.domain.classVar) | |

451 | data_gs = orange.ExampleTable(dom2, data) | |

[978] | 452 | xmean, W, P, T = PLSCall(data_gs, nc=components, y=[data_gs.domain.classVar], save_partial=True) |

453 | constructt[name] = inds, xmean, W, P | |

454 | ||

[998] | 455 | return PLS(constructt=constructt) |

[977] | 456 | |

[998] | 457 | class PCA(object): |

458 | ||

459 | def __init__(self, **kwargs): | |

460 | for a,b in kwargs.items(): | |

461 | setattr(self, a, b) | |

462 | ||

463 | def __call__(self, example): | |

464 | od = {} | |

465 | ||

466 | for name, constt in self.constructt.items(): | |

467 | ts = pca_transform(example, constt)[0] | |

468 | od[name] = ts | |

469 | ||

470 | return od | |

471 | ||

472 | def pca_transform(example, constt): | |

473 | inds, evals, evect, xmean = constt | |

474 | dom = orange.Domain([example.domain.attributes[i1] for i1 in inds ], False) | |

475 | newex = orange.ExampleTable(dom, [example]) | |

476 | arr = newex.toNumpy()[0] | |

[1009] | 477 | arr = arr - xmean # same input transformation - a row in a matrix |

[998] | 478 | |

[1009] | 479 | ev0 = evect[0] #this is a row in a matrix - do a dot product |

480 | a = numpy.dot(arr, ev0) | |

[998] | 481 | return a |

482 | ||

483 | def pca(data, snapshot=0): | |

484 | "Perform PCA on M, return eigenvectors and eigenvalues, sorted." | |

485 | M = data.toNumpy("a")[0] | |

486 | XMean = numpy.mean(M, axis = 0) | |

487 | M = M - XMean | |

488 | ||

489 | T, N = numpy.shape(M) | |

490 | # if there are less rows T than columns N, use snapshot method | |

491 | if (T < N) or snapshot: | |

492 | C = numpy.dot(M, numpy.transpose(M)) | |

[1011] | 493 | evals, evecsC = numpy.linalg.eigh(C) #columns of evecsC are eigenvectors |

[1009] | 494 | evecs = numpy.dot(M.T, evecsC)/numpy.sqrt(numpy.abs(evals)) |

[998] | 495 | else: |

496 | K = numpy.dot(numpy.transpose(M), M) | |

[1011] | 497 | evals, evecs = numpy.linalg.eigh(K) |

[1009] | 498 | |

499 | evecs = numpy.transpose(evecs) | |

500 | ||

[998] | 501 | # sort the eigenvalues and eigenvectors, decending order |

502 | order = (numpy.argsort(numpy.abs(evals))[::-1]) | |

503 | evecs = numpy.take(evecs, order, 0) | |

504 | evals = numpy.take(evals, order) | |

505 | return evals, evecs, XMean | |

506 | ||

507 | class PCALearner(object): | |

508 | """ Transforms gene sets using Principal Leasts Squares. """ | |

509 | ||

[1075] | 510 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None): |

[998] | 511 | |

[1075] | 512 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ |

[998] | 513 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) |

514 | ||

515 | constructt = {} | |

516 | ||

517 | #build weight matrices for every gene set | |

518 | for name, inds in gsetsnum.items(): | |

519 | dom2 = orange.Domain([ data.domain.attributes[i1] for i1 in inds ], data.domain.classVar) | |

520 | ||

521 | data_gs = orange.ExampleTable(dom2, data) | |

522 | evals, evect, xmean = pca(data_gs) | |

523 | constructt[name] = inds, evals, evect, xmean | |

524 | ||

525 | return PCA(constructt=constructt) | |

526 | ||

[848] | 527 | |

528 | class SimpleFun(object): | |

529 | ||

530 | def __init__(self, **kwargs): | |

531 | for a,b in kwargs.items(): | |

532 | setattr(self, a, b) | |

533 | ||

534 | def __call__(self, example): | |

[874] | 535 | #for name,ids in self.gsets.items(): |

536 | # print name, [example[i].value for i in ids], self.fn([example[i].value for i in ids]) | |

[852] | 537 | return dict( (name, self.fn([example[i].value for i in ids])) \ |

538 | for name,ids in self.gsets.items() ) | |

[848] | 539 | |

540 | class SimpleFunLearner(object): | |

541 | """ | |

542 | Just applies a function taking attribute values of an example and | |

543 | produces to all gene sets. | |

544 | """ | |

[1075] | 545 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None, fn=None): |

546 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ | |

[848] | 547 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) |

548 | return SimpleFun(gsets=gsetsnum, fn=fn) | |

549 | ||

550 | class MedianLearner(object): | |

551 | ||

[1075] | 552 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None, fn=None): |

[848] | 553 | sfl = SimpleFunLearner() |

[1075] | 554 | return sfl(data, matcher, geneSets, \ |

[848] | 555 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues, fn=statc.median) |

556 | ||

557 | class MeanLearner(object): | |

558 | ||

[1075] | 559 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None, fn=None): |

[848] | 560 | sfl = SimpleFunLearner() |

[1075] | 561 | return sfl(data, matcher, geneSets, \ |

[848] | 562 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues, fn=statc.mean) |

563 | ||

[977] | 564 | def impute_missing(data): |

565 | #remove attributes with only unknown values | |

566 | newatts = [] | |

567 | for at in data.domain.attributes: | |

568 | svalues = [ 1 for a in data if a[at].isSpecial() ] | |

569 | real = len(data) - len(svalues) | |

570 | if real > 0: | |

571 | newatts.append(at) | |

572 | ||

573 | dom2 = orange.Domain(newatts, data.domain.classVar) | |

574 | data = orange.ExampleTable(dom2, data) | |

575 | ||

576 | #impute | |

[1632] | 577 | from Orange.orng import orngTree |

[977] | 578 | imputer = orange.ImputerConstructor_model() |

579 | imputer.learnerContinuous = imputer.learnerDiscrete = orange.MajorityLearner() | |

580 | imputer = imputer(data) | |

581 | ||

582 | data = imputer(data) | |

583 | return data | |

584 | ||

[981] | 585 | def setSig_example(ldata, ex, genesets): |

586 | """ | |

587 | Create an dictionary with (geneset_name, geneset_expression) for every | |

588 | geneset on input. | |

589 | ||

590 | ldata is class-annotated data | |

591 | """ | |

592 | #seznam ocen genesetov za posamezen primer v ucni mzozici | |

593 | pathways = {} | |

594 | ||

595 | def setSig_example_geneset(ex, data): | |

596 | """ ex contains only selected genes """ | |

597 | ||

598 | distances = [ [], [] ] | |

599 | ||

600 | def pearsonr(v1, v2): | |

601 | try: | |

602 | return statc.pearsonr(v1, v2)[0] | |

603 | except: | |

604 | return numpy.corrcoef([v1, v2])[0,1] | |

605 | ||

606 | def pearson(ex1, ex2): | |

607 | attrs = range(len(ex1.domain.attributes)) | |

608 | vals1 = [ ex1[i].value for i in attrs ] | |

609 | vals2 = [ ex2[i].value for i in attrs ] | |

610 | return pearsonr(vals1, vals2) | |

611 | ||

612 | def ttest(ex1, ex2): | |

613 | try: | |

614 | return stats.lttest_ind(ex1, ex2)[0] | |

615 | except: | |

616 | return 0.0 | |

617 | ||

618 | #maps class value to its index | |

619 | classValueMap = dict( [ (val,i) for i,val in enumerate(data.domain.classVar.values) ]) | |

620 | ||

621 | #create distances to all learning data - save or other class | |

622 | for c in data: | |

623 | distances[classValueMap[c[-1].value]].append(pearson(c, ex)) | |

624 | ||

625 | return ttest(distances[0], distances[1]) | |

626 | ||

627 | for name, gs in genesets.items(): #for each geneset | |

[1570] | 628 | #for each gene set: take the attribute subset and work on the attribute subset only |

[981] | 629 | #only select the subset of genes from the learning data |

630 | domain = orange.Domain([ldata.domain.attributes[ai] for ai in gs], ldata.domain.classVar) | |

631 | datao = orange.ExampleTable(domain, ldata) | |

632 | example = orange.Example(domain, ex) #domains need to be the same | |

633 | ||

634 | ocena = setSig_example_geneset(example, datao) | |

635 | pathways[name] = ocena | |

636 | ||

637 | return pathways | |

638 | ||

639 | class SetSig(object): | |

640 | ||

641 | def __init__(self, **kwargs): | |

642 | for a,b in kwargs.items(): | |

643 | setattr(self, a, b) | |

644 | ||

645 | def __call__(self, example): | |

646 | return setSig_example(self.learndata, example, self.genesets) | |

647 | ||

648 | class SetSigLearner(object): | |

649 | ||

[1075] | 650 | def __call__(self, data, matcher, geneSets, minSize=3, maxSize=1000, minPart=0.1, classValues=None): |

651 | data, oknames, gsetsnum = selectGenesetsData(data, matcher, geneSets, \ | |

[981] | 652 | minSize=minSize, maxSize=maxSize, minPart=minPart, classValues=classValues) |

653 | return SetSig(learndata=data, genesets=gsetsnum) | |

[848] | 654 | |

[485] | 655 | if __name__ == "__main__": |

[1075] | 656 | |

[1570] | 657 | data = Orange.data.Table("iris") |

658 | gsets = obiGeneSets.collections({ | |

[1597] | 659 | #"ALL": ['sepal length', 'sepal width', 'petal length', 'petal width'], |

[1570] | 660 | "f3": ['sepal length', 'sepal width', 'petal length'], |

661 | "l3": ['sepal width', 'petal length', 'petal width'], | |

662 | }) | |

[981] | 663 | |

[1570] | 664 | fp = 120 |

[981] | 665 | ldata = orange.ExampleTable(data.domain, data[:fp]) |

666 | tdata = orange.ExampleTable(data.domain, data[fp:]) | |

667 | ||

[1570] | 668 | matcher = obiGene.matcher([]) |

[844] | 669 | |

[1570] | 670 | choosen_cv = ["Iris-setosa", "Iris-versicolor"] |

[1413] | 671 | #ass = AssessLearner()(data, matcher, gsets, rankingf=AT_loessLearner()) |

[1607] | 672 | ass = AssessLearner()(data, matcher, gsets, minPart=0.0) |

[1592] | 673 | #ass = MeanLearner()(data, matcher, gsets, default=False) |

[1607] | 674 | #ass = CORGsLearner()(data, matcher, gsets) |

[1593] | 675 | #ass = MedianLearner()(data, matcher, gsets) |

[1597] | 676 | #ass = PLSLearner()(data, matcher, gsets, classValues=choosen_cv, minPart=0.0) |

[1592] | 677 | #ass = SetSigLearner()(ldata, matcher, gsets, classValues=choosen_cv, minPart=0.0) |

[1600] | 678 | #ass = PCALearner()(ldata, matcher, gsets, classValues=choosen_cv, minPart=0.0) |

[1599] | 679 | #ass = GSALearner()(ldata, matcher, gsets, classValues=choosen_cv, minPart=0.0) |

[977] | 680 | |

[981] | 681 | ar = defaultdict(list) |

[1570] | 682 | for d in (list(ldata) + list(tdata))[:5]: |

[981] | 683 | for a,b in ass(d).items(): |

684 | ar[a].append(b) | |

[485] | 685 | |

[1571] | 686 | def pp1(ar): |

687 | ol = sorted(ar.items()) | |

688 | print '\n'.join([ a.id + ": " +str(b) for a,b in ol]) | |

[485] | 689 | |

[1572] | 690 | pp1(ar) |

[1571] | 691 |

**Note:**See TracBrowser for help on using the repository browser.