Actual source code: mkl_pardiso.c

  1: #include <../src/mat/impls/aij/seq/aij.h>
  2: #include <../src/mat/impls/sbaij/seq/sbaij.h>
  3: #include <../src/mat/impls/dense/seq/dense.h>

  5: #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
  6: #define MKL_ILP64
  7: #endif
  8: #include <mkl_pardiso.h>

 10: PETSC_EXTERN void PetscSetMKL_PARDISOThreads(int);

 12: /*
 13:  *  Possible mkl_pardiso phases that controls the execution of the solver.
 14:  *  For more information check mkl_pardiso manual.
 15:  */
 16: #define JOB_ANALYSIS 11
 17: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION 12
 18: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 13
 19: #define JOB_NUMERICAL_FACTORIZATION 22
 20: #define JOB_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 23
 21: #define JOB_SOLVE_ITERATIVE_REFINEMENT 33
 22: #define JOB_SOLVE_FORWARD_SUBSTITUTION 331
 23: #define JOB_SOLVE_DIAGONAL_SUBSTITUTION 332
 24: #define JOB_SOLVE_BACKWARD_SUBSTITUTION 333
 25: #define JOB_RELEASE_OF_LU_MEMORY 0
 26: #define JOB_RELEASE_OF_ALL_MEMORY -1

 28: #define IPARM_SIZE 64

 30: #if defined(PETSC_USE_64BIT_INDICES)
 31:  #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
 32:   #define INT_TYPE long long int
 33:   #define MKL_PARDISO pardiso
 34:   #define MKL_PARDISO_INIT pardisoinit
 35:  #else
 36:   /* this is the case where the MKL BLAS/LAPACK are 32 bit integers but the 64 bit integer version of
 37:      of Pardiso code is used; hence the need for the 64 below*/
 38:   #define INT_TYPE long long int
 39:   #define MKL_PARDISO pardiso_64
 40:   #define MKL_PARDISO_INIT pardiso_64init
 41: void pardiso_64init(void *pt, INT_TYPE *mtype, INT_TYPE iparm [])
 42: {
 43:   int iparm_copy[IPARM_SIZE], mtype_copy, i;

 45:   mtype_copy = *mtype;
 46:   pardisoinit(pt, &mtype_copy, iparm_copy);
 47:   for (i=0; i<IPARM_SIZE; i++) iparm[i] = iparm_copy[i];
 48: }
 49:  #endif
 50: #else
 51:  #define INT_TYPE int
 52:  #define MKL_PARDISO pardiso
 53:  #define MKL_PARDISO_INIT pardisoinit
 54: #endif

 56: /*
 57:  *  Internal data structure.
 58:  *  For more information check mkl_pardiso manual.
 59:  */
 60: typedef struct {

 62:   /* Configuration vector*/
 63:   INT_TYPE     iparm[IPARM_SIZE];

 65:   /*
 66:    * Internal mkl_pardiso memory location.
 67:    * After the first call to mkl_pardiso do not modify pt, as that could cause a serious memory leak.
 68:    */
 69:   void         *pt[IPARM_SIZE];

 71:   /* Basic mkl_pardiso info*/
 72:   INT_TYPE     phase, maxfct, mnum, mtype, n, nrhs, msglvl, err;

 74:   /* Matrix structure*/
 75:   void         *a;
 76:   INT_TYPE     *ia, *ja;

 78:   /* Number of non-zero elements*/
 79:   INT_TYPE     nz;

 81:   /* Row permutaton vector*/
 82:   INT_TYPE     *perm;

 84:   /* Define if matrix preserves sparse structure.*/
 85:   MatStructure matstruc;

 87:   PetscBool    needsym;
 88:   PetscBool    freeaij;

 90:   /* Schur complement */
 91:   PetscScalar  *schur;
 92:   PetscInt     schur_size;
 93:   PetscInt     *schur_idxs;
 94:   PetscScalar  *schur_work;
 95:   PetscBLASInt schur_work_size;
 96:   PetscBool    solve_interior;

 98:   /* True if mkl_pardiso function have been used.*/
 99:   PetscBool CleanUp;

101:   /* Conversion to a format suitable for MKL */
102:   PetscErrorCode (*Convert)(Mat, PetscBool, MatReuse, PetscBool*, INT_TYPE*, INT_TYPE**, INT_TYPE**, PetscScalar**);
103: } Mat_MKL_PARDISO;

105: PetscErrorCode MatMKLPardiso_Convert_seqsbaij(Mat A,PetscBool sym,MatReuse reuse,PetscBool *free,INT_TYPE *nnz,INT_TYPE **r,INT_TYPE **c,PetscScalar **v)
106: {
107:   Mat_SeqSBAIJ   *aa = (Mat_SeqSBAIJ*)A->data;
108:   PetscInt       bs  = A->rmap->bs,i;

111:   *v      = aa->a;
112:   if (bs == 1) { /* already in the correct format */
113:     /* though PetscInt and INT_TYPE are of the same size since they are defined differently the Intel compiler requires a cast */
114:     *r    = (INT_TYPE*)aa->i;
115:     *c    = (INT_TYPE*)aa->j;
116:     *nnz  = (INT_TYPE)aa->nz;
117:     *free = PETSC_FALSE;
118:   } else if (reuse == MAT_INITIAL_MATRIX) {
119:     PetscInt m = A->rmap->n,nz = aa->nz;
120:     PetscInt *row,*col;
121:     PetscMalloc2(m+1,&row,nz,&col);
122:     for (i=0; i<m+1; i++) {
123:       row[i] = aa->i[i]+1;
124:     }
125:     for (i=0; i<nz; i++) {
126:       col[i] = aa->j[i]+1;
127:     }
128:     *r    = (INT_TYPE*)row;
129:     *c    = (INT_TYPE*)col;
130:     *nnz  = (INT_TYPE)nz;
131:     *free = PETSC_TRUE;
132:   }
133:   return 0;
134: }

136: PetscErrorCode MatMKLPardiso_Convert_seqbaij(Mat A,PetscBool sym,MatReuse reuse,PetscBool *free,INT_TYPE *nnz,INT_TYPE **r,INT_TYPE **c,PetscScalar **v)
137: {
138:   Mat_SeqBAIJ    *aa = (Mat_SeqBAIJ*)A->data;
139:   PetscInt       bs  = A->rmap->bs,i;

141:   if (!sym) {
142:     *v      = aa->a;
143:     if (bs == 1) { /* already in the correct format */
144:       /* though PetscInt and INT_TYPE are of the same size since they are defined differently the Intel compiler requires a cast */
145:       *r    = (INT_TYPE*)aa->i;
146:       *c    = (INT_TYPE*)aa->j;
147:       *nnz  = (INT_TYPE)aa->nz;
148:       *free = PETSC_FALSE;
149:       return 0;
150:     } else if (reuse == MAT_INITIAL_MATRIX) {
151:       PetscInt m = A->rmap->n,nz = aa->nz;
152:       PetscInt *row,*col;
153:       PetscMalloc2(m+1,&row,nz,&col);
154:       for (i=0; i<m+1; i++) {
155:         row[i] = aa->i[i]+1;
156:       }
157:       for (i=0; i<nz; i++) {
158:         col[i] = aa->j[i]+1;
159:       }
160:       *r    = (INT_TYPE*)row;
161:       *c    = (INT_TYPE*)col;
162:       *nnz  = (INT_TYPE)nz;
163:     }
164:     *free = PETSC_TRUE;
165:   } else {
166:     SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"This should not happen");
167:   }
168:   return 0;
169: }

171: PetscErrorCode MatMKLPardiso_Convert_seqaij(Mat A,PetscBool sym,MatReuse reuse,PetscBool *free,INT_TYPE *nnz,INT_TYPE **r,INT_TYPE **c,PetscScalar **v)
172: {
173:   Mat_SeqAIJ     *aa = (Mat_SeqAIJ*)A->data;
174:   PetscScalar    *aav;

176:   MatSeqAIJGetArrayRead(A,(const PetscScalar**)&aav);
177:   if (!sym) { /* already in the correct format */
178:     *v    = aav;
179:     *r    = (INT_TYPE*)aa->i;
180:     *c    = (INT_TYPE*)aa->j;
181:     *nnz  = (INT_TYPE)aa->nz;
182:     *free = PETSC_FALSE;
183:   } else if (reuse == MAT_INITIAL_MATRIX) { /* need to get the triangular part */
184:     PetscScalar *vals,*vv;
185:     PetscInt    *row,*col,*jj;
186:     PetscInt    m = A->rmap->n,nz,i;

188:     nz = 0;
189:     for (i=0; i<m; i++) nz += aa->i[i+1] - aa->diag[i];
190:     PetscMalloc2(m+1,&row,nz,&col);
191:     PetscMalloc1(nz,&vals);
192:     jj = col;
193:     vv = vals;

195:     row[0] = 0;
196:     for (i=0; i<m; i++) {
197:       PetscInt    *aj = aa->j + aa->diag[i];
198:       PetscScalar *av = aav + aa->diag[i];
199:       PetscInt    rl  = aa->i[i+1] - aa->diag[i],j;

201:       for (j=0; j<rl; j++) {
202:         *jj = *aj; jj++; aj++;
203:         *vv = *av; vv++; av++;
204:       }
205:       row[i+1] = row[i] + rl;
206:     }
207:     *v    = vals;
208:     *r    = (INT_TYPE*)row;
209:     *c    = (INT_TYPE*)col;
210:     *nnz  = (INT_TYPE)nz;
211:     *free = PETSC_TRUE;
212:   } else {
213:     PetscScalar *vv;
214:     PetscInt    m = A->rmap->n,i;

216:     vv = *v;
217:     for (i=0; i<m; i++) {
218:       PetscScalar *av = aav + aa->diag[i];
219:       PetscInt    rl  = aa->i[i+1] - aa->diag[i],j;
220:       for (j=0; j<rl; j++) {
221:         *vv = *av; vv++; av++;
222:       }
223:     }
224:     *free = PETSC_TRUE;
225:   }
226:   MatSeqAIJRestoreArrayRead(A,(const PetscScalar**)&aav);
227:   return 0;
228: }

230: static PetscErrorCode MatMKLPardisoSolveSchur_Private(Mat F, PetscScalar *B, PetscScalar *X)
231: {
232:   Mat_MKL_PARDISO      *mpardiso = (Mat_MKL_PARDISO*)F->data;
233:   Mat                  S,Xmat,Bmat;
234:   MatFactorSchurStatus schurstatus;

236:   MatFactorGetSchurComplement(F,&S,&schurstatus);
238:   MatCreateSeqDense(PETSC_COMM_SELF,mpardiso->schur_size,mpardiso->nrhs,B,&Bmat);
239:   MatCreateSeqDense(PETSC_COMM_SELF,mpardiso->schur_size,mpardiso->nrhs,X,&Xmat);
240:   MatSetType(Bmat,((PetscObject)S)->type_name);
241:   MatSetType(Xmat,((PetscObject)S)->type_name);
242: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
243:   MatBindToCPU(Xmat,S->boundtocpu);
244:   MatBindToCPU(Bmat,S->boundtocpu);
245: #endif

247: #if defined(PETSC_USE_COMPLEX)
249: #endif

251:   switch (schurstatus) {
252:   case MAT_FACTOR_SCHUR_FACTORED:
253:     if (!mpardiso->iparm[12-1]) {
254:       MatMatSolve(S,Bmat,Xmat);
255:     } else { /* transpose solve */
256:       MatMatSolveTranspose(S,Bmat,Xmat);
257:     }
258:     break;
259:   case MAT_FACTOR_SCHUR_INVERTED:
260:     MatProductCreateWithMat(S,Bmat,NULL,Xmat);
261:     if (!mpardiso->iparm[12-1]) {
262:       MatProductSetType(Xmat,MATPRODUCT_AB);
263:     } else { /* transpose solve */
264:       MatProductSetType(Xmat,MATPRODUCT_AtB);
265:     }
266:     MatProductSetFromOptions(Xmat);
267:     MatProductSymbolic(Xmat);
268:     MatProductNumeric(Xmat);
269:     MatProductClear(Xmat);
270:     break;
271:   default:
272:     SETERRQ(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %" PetscInt_FMT,F->schur_status);
273:     break;
274:   }
275:   MatFactorRestoreSchurComplement(F,&S,schurstatus);
276:   MatDestroy(&Bmat);
277:   MatDestroy(&Xmat);
278:   return 0;
279: }

281: PetscErrorCode MatFactorSetSchurIS_MKL_PARDISO(Mat F, IS is)
282: {
283:   Mat_MKL_PARDISO   *mpardiso = (Mat_MKL_PARDISO*)F->data;
284:   const PetscScalar *arr;
285:   const PetscInt    *idxs;
286:   PetscInt          size,i;
287:   PetscMPIInt       csize;
288:   PetscBool         sorted;

290:   MPI_Comm_size(PetscObjectComm((PetscObject)F),&csize);
292:   ISSorted(is,&sorted);
293:   if (!sorted) {
294:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS for MKL_PARDISO Schur complements needs to be sorted");
295:   }
296:   ISGetLocalSize(is,&size);
297:   PetscFree(mpardiso->schur_work);
298:   PetscBLASIntCast(PetscMax(mpardiso->n,2*size),&mpardiso->schur_work_size);
299:   PetscMalloc1(mpardiso->schur_work_size,&mpardiso->schur_work);
300:   MatDestroy(&F->schur);
301:   MatCreateSeqDense(PETSC_COMM_SELF,size,size,NULL,&F->schur);
302:   MatDenseGetArrayRead(F->schur,&arr);
303:   mpardiso->schur      = (PetscScalar*)arr;
304:   mpardiso->schur_size = size;
305:   MatDenseRestoreArrayRead(F->schur,&arr);
306:   if (mpardiso->mtype == 2) {
307:     MatSetOption(F->schur,MAT_SPD,PETSC_TRUE);
308:   }

310:   PetscFree(mpardiso->schur_idxs);
311:   PetscMalloc1(size,&mpardiso->schur_idxs);
312:   PetscArrayzero(mpardiso->perm,mpardiso->n);
313:   ISGetIndices(is,&idxs);
314:   PetscArraycpy(mpardiso->schur_idxs,idxs,size);
315:   for (i=0;i<size;i++) mpardiso->perm[idxs[i]] = 1;
316:   ISRestoreIndices(is,&idxs);
317:   if (size) { /* turn on Schur switch if the set of indices is not empty */
318:     mpardiso->iparm[36-1] = 2;
319:   }
320:   return 0;
321: }

323: PetscErrorCode MatDestroy_MKL_PARDISO(Mat A)
324: {
325:   Mat_MKL_PARDISO *mat_mkl_pardiso=(Mat_MKL_PARDISO*)A->data;

327:   if (mat_mkl_pardiso->CleanUp) {
328:     mat_mkl_pardiso->phase = JOB_RELEASE_OF_ALL_MEMORY;

330:     MKL_PARDISO (mat_mkl_pardiso->pt,
331:       &mat_mkl_pardiso->maxfct,
332:       &mat_mkl_pardiso->mnum,
333:       &mat_mkl_pardiso->mtype,
334:       &mat_mkl_pardiso->phase,
335:       &mat_mkl_pardiso->n,
336:       NULL,
337:       NULL,
338:       NULL,
339:       NULL,
340:       &mat_mkl_pardiso->nrhs,
341:       mat_mkl_pardiso->iparm,
342:       &mat_mkl_pardiso->msglvl,
343:       NULL,
344:       NULL,
345:       &mat_mkl_pardiso->err);
346:   }
347:   PetscFree(mat_mkl_pardiso->perm);
348:   PetscFree(mat_mkl_pardiso->schur_work);
349:   PetscFree(mat_mkl_pardiso->schur_idxs);
350:   if (mat_mkl_pardiso->freeaij) {
351:     PetscFree2(mat_mkl_pardiso->ia,mat_mkl_pardiso->ja);
352:     if (mat_mkl_pardiso->iparm[34] == 1) {
353:       PetscFree(mat_mkl_pardiso->a);
354:     }
355:   }
356:   PetscFree(A->data);

358:   /* clear composed functions */
359:   PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);
360:   PetscObjectComposeFunction((PetscObject)A,"MatFactorSetSchurIS_C",NULL);
361:   PetscObjectComposeFunction((PetscObject)A,"MatMkl_PardisoSetCntl_C",NULL);
362:   return 0;
363: }

365: static PetscErrorCode MatMKLPardisoScatterSchur_Private(Mat_MKL_PARDISO *mpardiso, PetscScalar *whole, PetscScalar *schur, PetscBool reduce)
366: {
367:   if (reduce) { /* data given for the whole matrix */
368:     PetscInt i,m=0,p=0;
369:     for (i=0;i<mpardiso->nrhs;i++) {
370:       PetscInt j;
371:       for (j=0;j<mpardiso->schur_size;j++) {
372:         schur[p+j] = whole[m+mpardiso->schur_idxs[j]];
373:       }
374:       m += mpardiso->n;
375:       p += mpardiso->schur_size;
376:     }
377:   } else { /* from Schur to whole */
378:     PetscInt i,m=0,p=0;
379:     for (i=0;i<mpardiso->nrhs;i++) {
380:       PetscInt j;
381:       for (j=0;j<mpardiso->schur_size;j++) {
382:         whole[m+mpardiso->schur_idxs[j]] = schur[p+j];
383:       }
384:       m += mpardiso->n;
385:       p += mpardiso->schur_size;
386:     }
387:   }
388:   return 0;
389: }

391: PetscErrorCode MatSolve_MKL_PARDISO(Mat A,Vec b,Vec x)
392: {
393:   Mat_MKL_PARDISO   *mat_mkl_pardiso=(Mat_MKL_PARDISO*)A->data;
394:   PetscScalar       *xarray;
395:   const PetscScalar *barray;

397:   mat_mkl_pardiso->nrhs = 1;
398:   VecGetArrayWrite(x,&xarray);
399:   VecGetArrayRead(b,&barray);

401:   if (!mat_mkl_pardiso->schur) mat_mkl_pardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
402:   else mat_mkl_pardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;

404:   if (barray == xarray) { /* if the two vectors share the same memory */
405:     PetscScalar *work;
406:     if (!mat_mkl_pardiso->schur_work) {
407:       PetscMalloc1(mat_mkl_pardiso->n,&work);
408:     } else {
409:       work = mat_mkl_pardiso->schur_work;
410:     }
411:     mat_mkl_pardiso->iparm[6-1] = 1;
412:     MKL_PARDISO (mat_mkl_pardiso->pt,
413:       &mat_mkl_pardiso->maxfct,
414:       &mat_mkl_pardiso->mnum,
415:       &mat_mkl_pardiso->mtype,
416:       &mat_mkl_pardiso->phase,
417:       &mat_mkl_pardiso->n,
418:       mat_mkl_pardiso->a,
419:       mat_mkl_pardiso->ia,
420:       mat_mkl_pardiso->ja,
421:       NULL,
422:       &mat_mkl_pardiso->nrhs,
423:       mat_mkl_pardiso->iparm,
424:       &mat_mkl_pardiso->msglvl,
425:       (void*)xarray,
426:       (void*)work,
427:       &mat_mkl_pardiso->err);
428:     if (!mat_mkl_pardiso->schur_work) {
429:       PetscFree(work);
430:     }
431:   } else {
432:     mat_mkl_pardiso->iparm[6-1] = 0;
433:     MKL_PARDISO (mat_mkl_pardiso->pt,
434:       &mat_mkl_pardiso->maxfct,
435:       &mat_mkl_pardiso->mnum,
436:       &mat_mkl_pardiso->mtype,
437:       &mat_mkl_pardiso->phase,
438:       &mat_mkl_pardiso->n,
439:       mat_mkl_pardiso->a,
440:       mat_mkl_pardiso->ia,
441:       mat_mkl_pardiso->ja,
442:       mat_mkl_pardiso->perm,
443:       &mat_mkl_pardiso->nrhs,
444:       mat_mkl_pardiso->iparm,
445:       &mat_mkl_pardiso->msglvl,
446:       (void*)barray,
447:       (void*)xarray,
448:       &mat_mkl_pardiso->err);
449:   }
450:   VecRestoreArrayRead(b,&barray);


454:   if (mat_mkl_pardiso->schur) { /* solve Schur complement and expand solution */
455:     if (!mat_mkl_pardiso->solve_interior) {
456:       PetscInt shift = mat_mkl_pardiso->schur_size;

458:       MatFactorFactorizeSchurComplement(A);
459:       /* if inverted, uses BLAS *MM subroutines, otherwise LAPACK *TRS */
460:       if (A->schur_status != MAT_FACTOR_SCHUR_INVERTED) shift = 0;

462:       /* solve Schur complement */
463:       MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso,xarray,mat_mkl_pardiso->schur_work,PETSC_TRUE);
464:       MatMKLPardisoSolveSchur_Private(A,mat_mkl_pardiso->schur_work,mat_mkl_pardiso->schur_work+shift);
465:       MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso,xarray,mat_mkl_pardiso->schur_work+shift,PETSC_FALSE);
466:     } else { /* if we are solving for the interior problem, any value in barray[schur] forward-substituted to xarray[schur] will be neglected */
467:       PetscInt i;
468:       for (i=0;i<mat_mkl_pardiso->schur_size;i++) {
469:         xarray[mat_mkl_pardiso->schur_idxs[i]] = 0.;
470:       }
471:     }

473:     /* expansion phase */
474:     mat_mkl_pardiso->iparm[6-1] = 1;
475:     mat_mkl_pardiso->phase = JOB_SOLVE_BACKWARD_SUBSTITUTION;
476:     MKL_PARDISO (mat_mkl_pardiso->pt,
477:       &mat_mkl_pardiso->maxfct,
478:       &mat_mkl_pardiso->mnum,
479:       &mat_mkl_pardiso->mtype,
480:       &mat_mkl_pardiso->phase,
481:       &mat_mkl_pardiso->n,
482:       mat_mkl_pardiso->a,
483:       mat_mkl_pardiso->ia,
484:       mat_mkl_pardiso->ja,
485:       mat_mkl_pardiso->perm,
486:       &mat_mkl_pardiso->nrhs,
487:       mat_mkl_pardiso->iparm,
488:       &mat_mkl_pardiso->msglvl,
489:       (void*)xarray,
490:       (void*)mat_mkl_pardiso->schur_work, /* according to the specs, the solution vector is always used */
491:       &mat_mkl_pardiso->err);

494:     mat_mkl_pardiso->iparm[6-1] = 0;
495:   }
496:   VecRestoreArrayWrite(x,&xarray);
497:   mat_mkl_pardiso->CleanUp = PETSC_TRUE;
498:   return 0;
499: }

501: PetscErrorCode MatSolveTranspose_MKL_PARDISO(Mat A,Vec b,Vec x)
502: {
503:   Mat_MKL_PARDISO *mat_mkl_pardiso=(Mat_MKL_PARDISO*)A->data;
504:   PetscInt        oiparm12;

506:   oiparm12 = mat_mkl_pardiso->iparm[12 - 1];
507:   mat_mkl_pardiso->iparm[12 - 1] = 2;
508:   MatSolve_MKL_PARDISO(A,b,x);
509:   mat_mkl_pardiso->iparm[12 - 1] = oiparm12;
510:   return 0;
511: }

513: PetscErrorCode MatMatSolve_MKL_PARDISO(Mat A,Mat B,Mat X)
514: {
515:   Mat_MKL_PARDISO   *mat_mkl_pardiso=(Mat_MKL_PARDISO*)(A)->data;
516:   const PetscScalar *barray;
517:   PetscScalar       *xarray;
518:   PetscBool         flg;

520:   PetscObjectBaseTypeCompare((PetscObject)B,MATSEQDENSE,&flg);
522:   if (X != B) {
523:     PetscObjectBaseTypeCompare((PetscObject)X,MATSEQDENSE,&flg);
525:   }

527:   MatGetSize(B,NULL,(PetscInt*)&mat_mkl_pardiso->nrhs);

529:   if (mat_mkl_pardiso->nrhs > 0) {
530:     MatDenseGetArrayRead(B,&barray);
531:     MatDenseGetArrayWrite(X,&xarray);

534:     if (!mat_mkl_pardiso->schur) mat_mkl_pardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
535:     else mat_mkl_pardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;

537:     MKL_PARDISO (mat_mkl_pardiso->pt,
538:       &mat_mkl_pardiso->maxfct,
539:       &mat_mkl_pardiso->mnum,
540:       &mat_mkl_pardiso->mtype,
541:       &mat_mkl_pardiso->phase,
542:       &mat_mkl_pardiso->n,
543:       mat_mkl_pardiso->a,
544:       mat_mkl_pardiso->ia,
545:       mat_mkl_pardiso->ja,
546:       mat_mkl_pardiso->perm,
547:       &mat_mkl_pardiso->nrhs,
548:       mat_mkl_pardiso->iparm,
549:       &mat_mkl_pardiso->msglvl,
550:       (void*)barray,
551:       (void*)xarray,
552:       &mat_mkl_pardiso->err);

555:     MatDenseRestoreArrayRead(B,&barray);
556:     if (mat_mkl_pardiso->schur) { /* solve Schur complement and expand solution */
557:       PetscScalar *o_schur_work = NULL;

559:       /* solve Schur complement */
560:       if (!mat_mkl_pardiso->solve_interior) {
561:         PetscInt shift = mat_mkl_pardiso->schur_size*mat_mkl_pardiso->nrhs,scale;
562:         PetscInt mem = mat_mkl_pardiso->n*mat_mkl_pardiso->nrhs;

564:         MatFactorFactorizeSchurComplement(A);
565:         /* allocate extra memory if it is needed */
566:         scale = 1;
567:         if (A->schur_status == MAT_FACTOR_SCHUR_INVERTED) scale = 2;
568:         mem *= scale;
569:         if (mem > mat_mkl_pardiso->schur_work_size) {
570:           o_schur_work = mat_mkl_pardiso->schur_work;
571:           PetscMalloc1(mem,&mat_mkl_pardiso->schur_work);
572:         }
573:         /* if inverted, uses BLAS *MM subroutines, otherwise LAPACK *TRS */
574:         if (A->schur_status != MAT_FACTOR_SCHUR_INVERTED) shift = 0;
575:         MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso,xarray,mat_mkl_pardiso->schur_work,PETSC_TRUE);
576:         MatMKLPardisoSolveSchur_Private(A,mat_mkl_pardiso->schur_work,mat_mkl_pardiso->schur_work+shift);
577:         MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso,xarray,mat_mkl_pardiso->schur_work+shift,PETSC_FALSE);
578:       } else { /* if we are solving for the interior problem, any value in barray[schur,n] forward-substituted to xarray[schur,n] will be neglected */
579:         PetscInt i,n,m=0;
580:         for (n=0;n<mat_mkl_pardiso->nrhs;n++) {
581:           for (i=0;i<mat_mkl_pardiso->schur_size;i++) {
582:             xarray[mat_mkl_pardiso->schur_idxs[i]+m] = 0.;
583:           }
584:           m += mat_mkl_pardiso->n;
585:         }
586:       }

588:       /* expansion phase */
589:       mat_mkl_pardiso->iparm[6-1] = 1;
590:       mat_mkl_pardiso->phase = JOB_SOLVE_BACKWARD_SUBSTITUTION;
591:       MKL_PARDISO (mat_mkl_pardiso->pt,
592:         &mat_mkl_pardiso->maxfct,
593:         &mat_mkl_pardiso->mnum,
594:         &mat_mkl_pardiso->mtype,
595:         &mat_mkl_pardiso->phase,
596:         &mat_mkl_pardiso->n,
597:         mat_mkl_pardiso->a,
598:         mat_mkl_pardiso->ia,
599:         mat_mkl_pardiso->ja,
600:         mat_mkl_pardiso->perm,
601:         &mat_mkl_pardiso->nrhs,
602:         mat_mkl_pardiso->iparm,
603:         &mat_mkl_pardiso->msglvl,
604:         (void*)xarray,
605:         (void*)mat_mkl_pardiso->schur_work, /* according to the specs, the solution vector is always used */
606:         &mat_mkl_pardiso->err);
607:       if (o_schur_work) { /* restore original schur_work (minimal size) */
608:         PetscFree(mat_mkl_pardiso->schur_work);
609:         mat_mkl_pardiso->schur_work = o_schur_work;
610:       }
612:       mat_mkl_pardiso->iparm[6-1] = 0;
613:     }
614:     MatDenseRestoreArrayWrite(X,&xarray);
615:   }
616:   mat_mkl_pardiso->CleanUp = PETSC_TRUE;
617:   return 0;
618: }

620: PetscErrorCode MatFactorNumeric_MKL_PARDISO(Mat F,Mat A,const MatFactorInfo *info)
621: {
622:   Mat_MKL_PARDISO *mat_mkl_pardiso=(Mat_MKL_PARDISO*)(F)->data;

624:   mat_mkl_pardiso->matstruc = SAME_NONZERO_PATTERN;
625:   (*mat_mkl_pardiso->Convert)(A,mat_mkl_pardiso->needsym,MAT_REUSE_MATRIX,&mat_mkl_pardiso->freeaij,&mat_mkl_pardiso->nz,&mat_mkl_pardiso->ia,&mat_mkl_pardiso->ja,(PetscScalar**)&mat_mkl_pardiso->a);

627:   mat_mkl_pardiso->phase = JOB_NUMERICAL_FACTORIZATION;
628:   MKL_PARDISO (mat_mkl_pardiso->pt,
629:     &mat_mkl_pardiso->maxfct,
630:     &mat_mkl_pardiso->mnum,
631:     &mat_mkl_pardiso->mtype,
632:     &mat_mkl_pardiso->phase,
633:     &mat_mkl_pardiso->n,
634:     mat_mkl_pardiso->a,
635:     mat_mkl_pardiso->ia,
636:     mat_mkl_pardiso->ja,
637:     mat_mkl_pardiso->perm,
638:     &mat_mkl_pardiso->nrhs,
639:     mat_mkl_pardiso->iparm,
640:     &mat_mkl_pardiso->msglvl,
641:     NULL,
642:     (void*)mat_mkl_pardiso->schur,
643:     &mat_mkl_pardiso->err);

646:   /* report flops */
647:   if (mat_mkl_pardiso->iparm[18] > 0) {
648:     PetscLogFlops(PetscPowRealInt(10.,6)*mat_mkl_pardiso->iparm[18]);
649:   }

651:   if (F->schur) { /* schur output from pardiso is in row major format */
652: #if defined(PETSC_HAVE_CUDA)
653:     F->schur->offloadmask = PETSC_OFFLOAD_CPU;
654: #endif
655:     MatFactorRestoreSchurComplement(F,NULL,MAT_FACTOR_SCHUR_UNFACTORED);
656:     MatTranspose(F->schur,MAT_INPLACE_MATRIX,&F->schur);
657:   }
658:   mat_mkl_pardiso->matstruc = SAME_NONZERO_PATTERN;
659:   mat_mkl_pardiso->CleanUp  = PETSC_TRUE;
660:   return 0;
661: }

663: PetscErrorCode PetscSetMKL_PARDISOFromOptions(Mat F, Mat A)
664: {
665:   Mat_MKL_PARDISO     *mat_mkl_pardiso = (Mat_MKL_PARDISO*)F->data;
666:   PetscErrorCode      ierr;
667:   PetscInt            icntl,bs,threads=1;
668:   PetscBool           flg;

670:   PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MKL_PARDISO Options","Mat");

672:   PetscOptionsInt("-mat_mkl_pardiso_65","Suggested number of threads to use within PARDISO","None",threads,&threads,&flg);
673:   if (flg) PetscSetMKL_PARDISOThreads((int)threads);

675:   PetscOptionsInt("-mat_mkl_pardiso_66","Maximum number of factors with identical sparsity structure that must be kept in memory at the same time","None",mat_mkl_pardiso->maxfct,&icntl,&flg);
676:   if (flg) mat_mkl_pardiso->maxfct = icntl;

678:   PetscOptionsInt("-mat_mkl_pardiso_67","Indicates the actual matrix for the solution phase","None",mat_mkl_pardiso->mnum,&icntl,&flg);
679:   if (flg) mat_mkl_pardiso->mnum = icntl;

681:   PetscOptionsInt("-mat_mkl_pardiso_68","Message level information","None",mat_mkl_pardiso->msglvl,&icntl,&flg);
682:   if (flg) mat_mkl_pardiso->msglvl = icntl;

684:   PetscOptionsInt("-mat_mkl_pardiso_69","Defines the matrix type","None",mat_mkl_pardiso->mtype,&icntl,&flg);
685:   if (flg) {
686:     void *pt[IPARM_SIZE];
687:     mat_mkl_pardiso->mtype = icntl;
688:     icntl = mat_mkl_pardiso->iparm[34];
689:     bs = mat_mkl_pardiso->iparm[36];
690:     MKL_PARDISO_INIT(pt, &mat_mkl_pardiso->mtype, mat_mkl_pardiso->iparm);
691: #if defined(PETSC_USE_REAL_SINGLE)
692:     mat_mkl_pardiso->iparm[27] = 1;
693: #else
694:     mat_mkl_pardiso->iparm[27] = 0;
695: #endif
696:     mat_mkl_pardiso->iparm[34] = icntl;
697:     mat_mkl_pardiso->iparm[36] = bs;
698:   }

700:   PetscOptionsInt("-mat_mkl_pardiso_1","Use default values (if 0)","None",mat_mkl_pardiso->iparm[0],&icntl,&flg);
701:   if (flg) mat_mkl_pardiso->iparm[0] = icntl;

703:   PetscOptionsInt("-mat_mkl_pardiso_2","Fill-in reducing ordering for the input matrix","None",mat_mkl_pardiso->iparm[1],&icntl,&flg);
704:   if (flg) mat_mkl_pardiso->iparm[1] = icntl;

706:   PetscOptionsInt("-mat_mkl_pardiso_4","Preconditioned CGS/CG","None",mat_mkl_pardiso->iparm[3],&icntl,&flg);
707:   if (flg) mat_mkl_pardiso->iparm[3] = icntl;

709:   PetscOptionsInt("-mat_mkl_pardiso_5","User permutation","None",mat_mkl_pardiso->iparm[4],&icntl,&flg);
710:   if (flg) mat_mkl_pardiso->iparm[4] = icntl;

712:   PetscOptionsInt("-mat_mkl_pardiso_6","Write solution on x","None",mat_mkl_pardiso->iparm[5],&icntl,&flg);
713:   if (flg) mat_mkl_pardiso->iparm[5] = icntl;

715:   PetscOptionsInt("-mat_mkl_pardiso_8","Iterative refinement step","None",mat_mkl_pardiso->iparm[7],&icntl,&flg);
716:   if (flg) mat_mkl_pardiso->iparm[7] = icntl;

718:   PetscOptionsInt("-mat_mkl_pardiso_10","Pivoting perturbation","None",mat_mkl_pardiso->iparm[9],&icntl,&flg);
719:   if (flg) mat_mkl_pardiso->iparm[9] = icntl;

721:   PetscOptionsInt("-mat_mkl_pardiso_11","Scaling vectors","None",mat_mkl_pardiso->iparm[10],&icntl,&flg);
722:   if (flg) mat_mkl_pardiso->iparm[10] = icntl;

724:   PetscOptionsInt("-mat_mkl_pardiso_12","Solve with transposed or conjugate transposed matrix A","None",mat_mkl_pardiso->iparm[11],&icntl,&flg);
725:   if (flg) mat_mkl_pardiso->iparm[11] = icntl;

727:   PetscOptionsInt("-mat_mkl_pardiso_13","Improved accuracy using (non-) symmetric weighted matching","None",mat_mkl_pardiso->iparm[12],&icntl,&flg);
728:   if (flg) mat_mkl_pardiso->iparm[12] = icntl;

730:   PetscOptionsInt("-mat_mkl_pardiso_18","Numbers of non-zero elements","None",mat_mkl_pardiso->iparm[17],&icntl,&flg);
731:   if (flg) mat_mkl_pardiso->iparm[17] = icntl;

733:   PetscOptionsInt("-mat_mkl_pardiso_19","Report number of floating point operations (0 to disable)","None",mat_mkl_pardiso->iparm[18],&icntl,&flg);
734:   if (flg) mat_mkl_pardiso->iparm[18] = icntl;

736:   PetscOptionsInt("-mat_mkl_pardiso_21","Pivoting for symmetric indefinite matrices","None",mat_mkl_pardiso->iparm[20],&icntl,&flg);
737:   if (flg) mat_mkl_pardiso->iparm[20] = icntl;

739:   PetscOptionsInt("-mat_mkl_pardiso_24","Parallel factorization control","None",mat_mkl_pardiso->iparm[23],&icntl,&flg);
740:   if (flg) mat_mkl_pardiso->iparm[23] = icntl;

742:   PetscOptionsInt("-mat_mkl_pardiso_25","Parallel forward/backward solve control","None",mat_mkl_pardiso->iparm[24],&icntl,&flg);
743:   if (flg) mat_mkl_pardiso->iparm[24] = icntl;

745:   PetscOptionsInt("-mat_mkl_pardiso_27","Matrix checker","None",mat_mkl_pardiso->iparm[26],&icntl,&flg);
746:   if (flg) mat_mkl_pardiso->iparm[26] = icntl;

748:   PetscOptionsInt("-mat_mkl_pardiso_31","Partial solve and computing selected components of the solution vectors","None",mat_mkl_pardiso->iparm[30],&icntl,&flg);
749:   if (flg) mat_mkl_pardiso->iparm[30] = icntl;

751:   PetscOptionsInt("-mat_mkl_pardiso_34","Optimal number of threads for conditional numerical reproducibility (CNR) mode","None",mat_mkl_pardiso->iparm[33],&icntl,&flg);
752:   if (flg) mat_mkl_pardiso->iparm[33] = icntl;

754:   PetscOptionsInt("-mat_mkl_pardiso_60","Intel MKL_PARDISO mode","None",mat_mkl_pardiso->iparm[59],&icntl,&flg);
755:   if (flg) mat_mkl_pardiso->iparm[59] = icntl;
756:   PetscOptionsEnd();
757:   return 0;
758: }

760: PetscErrorCode MatFactorMKL_PARDISOInitialize_Private(Mat A, MatFactorType ftype, Mat_MKL_PARDISO *mat_mkl_pardiso)
761: {
762:   PetscInt       i,bs;
763:   PetscBool      match;

765:   for (i=0; i<IPARM_SIZE; i++) mat_mkl_pardiso->iparm[i] = 0;
766:   for (i=0; i<IPARM_SIZE; i++) mat_mkl_pardiso->pt[i] = 0;
767: #if defined(PETSC_USE_REAL_SINGLE)
768:   mat_mkl_pardiso->iparm[27] = 1;
769: #else
770:   mat_mkl_pardiso->iparm[27] = 0;
771: #endif
772:   /* Default options for both sym and unsym */
773:   mat_mkl_pardiso->iparm[ 0] =  1; /* Solver default parameters overriden with provided by iparm */
774:   mat_mkl_pardiso->iparm[ 1] =  2; /* Metis reordering */
775:   mat_mkl_pardiso->iparm[ 5] =  0; /* Write solution into x */
776:   mat_mkl_pardiso->iparm[ 7] =  0; /* Max number of iterative refinement steps */
777:   mat_mkl_pardiso->iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
778:   mat_mkl_pardiso->iparm[18] = -1; /* Output: Mflops for LU factorization */
779: #if 0
780:   mat_mkl_pardiso->iparm[23] =  1; /* Parallel factorization control*/
781: #endif
782:   PetscObjectTypeCompareAny((PetscObject)A,&match,MATSEQBAIJ,MATSEQSBAIJ,"");
783:   MatGetBlockSize(A,&bs);
784:   if (!match || bs == 1) {
785:     mat_mkl_pardiso->iparm[34] = 1; /* Cluster Sparse Solver use C-style indexing for ia and ja arrays */
786:     mat_mkl_pardiso->n         = A->rmap->N;
787:   } else {
788:     mat_mkl_pardiso->iparm[34] = 0; /* Cluster Sparse Solver use Fortran-style indexing for ia and ja arrays */
789:     mat_mkl_pardiso->iparm[36] = bs;
790:     mat_mkl_pardiso->n         = A->rmap->N/bs;
791:   }
792:   mat_mkl_pardiso->iparm[39] =  0; /* Input: matrix/rhs/solution stored on rank-0 */

794:   mat_mkl_pardiso->CleanUp   = PETSC_FALSE;
795:   mat_mkl_pardiso->maxfct    = 1; /* Maximum number of numerical factorizations. */
796:   mat_mkl_pardiso->mnum      = 1; /* Which factorization to use. */
797:   mat_mkl_pardiso->msglvl    = 0; /* 0: do not print 1: Print statistical information in file */
798:   mat_mkl_pardiso->phase     = -1;
799:   mat_mkl_pardiso->err       = 0;

801:   mat_mkl_pardiso->nrhs      = 1;
802:   mat_mkl_pardiso->err       = 0;
803:   mat_mkl_pardiso->phase     = -1;

805:   if (ftype == MAT_FACTOR_LU) {
806:     mat_mkl_pardiso->iparm[ 9] = 13; /* Perturb the pivot elements with 1E-13 */
807:     mat_mkl_pardiso->iparm[10] =  1; /* Use nonsymmetric permutation and scaling MPS */
808:     mat_mkl_pardiso->iparm[12] =  1; /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
809:   } else {
810:     mat_mkl_pardiso->iparm[ 9] = 8; /* Perturb the pivot elements with 1E-8 */
811:     mat_mkl_pardiso->iparm[10] = 0; /* Use nonsymmetric permutation and scaling MPS */
812:     mat_mkl_pardiso->iparm[12] = 1; /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
813: #if defined(PETSC_USE_DEBUG)
814:     mat_mkl_pardiso->iparm[26] = 1; /* Matrix checker */
815: #endif
816:   }
817:   PetscCalloc1(A->rmap->N*sizeof(INT_TYPE), &mat_mkl_pardiso->perm);
818:   mat_mkl_pardiso->schur_size = 0;
819:   return 0;
820: }

822: PetscErrorCode MatFactorSymbolic_AIJMKL_PARDISO_Private(Mat F,Mat A,const MatFactorInfo *info)
823: {
824:   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO*)F->data;

826:   mat_mkl_pardiso->matstruc = DIFFERENT_NONZERO_PATTERN;
827:   PetscSetMKL_PARDISOFromOptions(F,A);
828:   /* throw away any previously computed structure */
829:   if (mat_mkl_pardiso->freeaij) {
830:     PetscFree2(mat_mkl_pardiso->ia,mat_mkl_pardiso->ja);
831:     if (mat_mkl_pardiso->iparm[34] == 1) {
832:       PetscFree(mat_mkl_pardiso->a);
833:     }
834:   }
835:   (*mat_mkl_pardiso->Convert)(A,mat_mkl_pardiso->needsym,MAT_INITIAL_MATRIX,&mat_mkl_pardiso->freeaij,&mat_mkl_pardiso->nz,&mat_mkl_pardiso->ia,&mat_mkl_pardiso->ja,(PetscScalar**)&mat_mkl_pardiso->a);
836:   if (mat_mkl_pardiso->iparm[34] == 1) mat_mkl_pardiso->n = A->rmap->N;
837:   else mat_mkl_pardiso->n = A->rmap->N/A->rmap->bs;

839:   mat_mkl_pardiso->phase = JOB_ANALYSIS;

841:   /* reset flops counting if requested */
842:   if (mat_mkl_pardiso->iparm[18]) mat_mkl_pardiso->iparm[18] = -1;

844:   MKL_PARDISO (mat_mkl_pardiso->pt,
845:     &mat_mkl_pardiso->maxfct,
846:     &mat_mkl_pardiso->mnum,
847:     &mat_mkl_pardiso->mtype,
848:     &mat_mkl_pardiso->phase,
849:     &mat_mkl_pardiso->n,
850:     mat_mkl_pardiso->a,
851:     mat_mkl_pardiso->ia,
852:     mat_mkl_pardiso->ja,
853:     mat_mkl_pardiso->perm,
854:     &mat_mkl_pardiso->nrhs,
855:     mat_mkl_pardiso->iparm,
856:     &mat_mkl_pardiso->msglvl,
857:     NULL,
858:     NULL,
859:     &mat_mkl_pardiso->err);

862:   mat_mkl_pardiso->CleanUp = PETSC_TRUE;

864:   if (F->factortype == MAT_FACTOR_LU) F->ops->lufactornumeric = MatFactorNumeric_MKL_PARDISO;
865:   else F->ops->choleskyfactornumeric = MatFactorNumeric_MKL_PARDISO;

867:   F->ops->solve           = MatSolve_MKL_PARDISO;
868:   F->ops->solvetranspose  = MatSolveTranspose_MKL_PARDISO;
869:   F->ops->matsolve        = MatMatSolve_MKL_PARDISO;
870:   return 0;
871: }

873: PetscErrorCode MatLUFactorSymbolic_AIJMKL_PARDISO(Mat F,Mat A,IS r,IS c,const MatFactorInfo *info)
874: {
875:   MatFactorSymbolic_AIJMKL_PARDISO_Private(F, A, info);
876:   return 0;
877: }

879: #if !defined(PETSC_USE_COMPLEX)
880: PetscErrorCode MatGetInertia_MKL_PARDISO(Mat F,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
881: {
882:   Mat_MKL_PARDISO   *mat_mkl_pardiso=(Mat_MKL_PARDISO*)F->data;

884:   if (nneg) *nneg = mat_mkl_pardiso->iparm[22];
885:   if (npos) *npos = mat_mkl_pardiso->iparm[21];
886:   if (nzero) *nzero = F->rmap->N - (mat_mkl_pardiso->iparm[22] + mat_mkl_pardiso->iparm[21]);
887:   return 0;
888: }
889: #endif

891: PetscErrorCode MatCholeskyFactorSymbolic_AIJMKL_PARDISO(Mat F,Mat A,IS r,const MatFactorInfo *info)
892: {
893:   MatFactorSymbolic_AIJMKL_PARDISO_Private(F, A, info);
894: #if defined(PETSC_USE_COMPLEX)
895:   F->ops->getinertia = NULL;
896: #else
897:   F->ops->getinertia = MatGetInertia_MKL_PARDISO;
898: #endif
899:   return 0;
900: }

902: PetscErrorCode MatView_MKL_PARDISO(Mat A, PetscViewer viewer)
903: {
904:   PetscBool         iascii;
905:   PetscViewerFormat format;
906:   Mat_MKL_PARDISO   *mat_mkl_pardiso=(Mat_MKL_PARDISO*)A->data;
907:   PetscInt          i;

909:   if (A->ops->solve != MatSolve_MKL_PARDISO) return 0;

911:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
912:   if (iascii) {
913:     PetscViewerGetFormat(viewer,&format);
914:     if (format == PETSC_VIEWER_ASCII_INFO) {
915:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO run parameters:\n");
916:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO phase:             %d \n",mat_mkl_pardiso->phase);
917:       for (i=1; i<=64; i++) {
918:         PetscViewerASCIIPrintf(viewer,"MKL_PARDISO iparm[%d]:     %d \n",i, mat_mkl_pardiso->iparm[i - 1]);
919:       }
920:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO maxfct:     %d \n", mat_mkl_pardiso->maxfct);
921:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO mnum:     %d \n", mat_mkl_pardiso->mnum);
922:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO mtype:     %d \n", mat_mkl_pardiso->mtype);
923:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO n:     %d \n", mat_mkl_pardiso->n);
924:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO nrhs:     %d \n", mat_mkl_pardiso->nrhs);
925:       PetscViewerASCIIPrintf(viewer,"MKL_PARDISO msglvl:     %d \n", mat_mkl_pardiso->msglvl);
926:     }
927:   }
928:   return 0;
929: }

931: PetscErrorCode MatGetInfo_MKL_PARDISO(Mat A, MatInfoType flag, MatInfo *info)
932: {
933:   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO*)A->data;

935:   info->block_size        = 1.0;
936:   info->nz_used           = mat_mkl_pardiso->iparm[17];
937:   info->nz_allocated      = mat_mkl_pardiso->iparm[17];
938:   info->nz_unneeded       = 0.0;
939:   info->assemblies        = 0.0;
940:   info->mallocs           = 0.0;
941:   info->memory            = 0.0;
942:   info->fill_ratio_given  = 0;
943:   info->fill_ratio_needed = 0;
944:   info->factor_mallocs    = 0;
945:   return 0;
946: }

948: PetscErrorCode MatMkl_PardisoSetCntl_MKL_PARDISO(Mat F,PetscInt icntl,PetscInt ival)
949: {
950:   PetscInt        backup,bs;
951:   Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO*)F->data;

953:   if (icntl <= 64) {
954:     mat_mkl_pardiso->iparm[icntl - 1] = ival;
955:   } else {
956:     if (icntl == 65) PetscSetMKL_PARDISOThreads(ival);
957:     else if (icntl == 66) mat_mkl_pardiso->maxfct = ival;
958:     else if (icntl == 67) mat_mkl_pardiso->mnum = ival;
959:     else if (icntl == 68) mat_mkl_pardiso->msglvl = ival;
960:     else if (icntl == 69) {
961:       void *pt[IPARM_SIZE];
962:       backup = mat_mkl_pardiso->iparm[34];
963:       bs = mat_mkl_pardiso->iparm[36];
964:       mat_mkl_pardiso->mtype = ival;
965:       MKL_PARDISO_INIT(pt, &mat_mkl_pardiso->mtype, mat_mkl_pardiso->iparm);
966: #if defined(PETSC_USE_REAL_SINGLE)
967:       mat_mkl_pardiso->iparm[27] = 1;
968: #else
969:       mat_mkl_pardiso->iparm[27] = 0;
970: #endif
971:       mat_mkl_pardiso->iparm[34] = backup;
972:       mat_mkl_pardiso->iparm[36] = bs;
973:     } else if (icntl==70) mat_mkl_pardiso->solve_interior = (PetscBool)!!ival;
974:   }
975:   return 0;
976: }

978: /*@
979:   MatMkl_PardisoSetCntl - Set Mkl_Pardiso parameters

981:    Logically Collective on Mat

983:    Input Parameters:
984: +  F - the factored matrix obtained by calling MatGetFactor()
985: .  icntl - index of Mkl_Pardiso parameter
986: -  ival - value of Mkl_Pardiso parameter

988:   Options Database:
989: .   -mat_mkl_pardiso_<icntl> <ival> - change the option numbered icntl to the value ival

991:    Level: beginner

993:    References:
994: .  * - Mkl_Pardiso Users' Guide

996: .seealso: MatGetFactor()
997: @*/
998: PetscErrorCode MatMkl_PardisoSetCntl(Mat F,PetscInt icntl,PetscInt ival)
999: {
1000:   PetscTryMethod(F,"MatMkl_PardisoSetCntl_C",(Mat,PetscInt,PetscInt),(F,icntl,ival));
1001:   return 0;
1002: }

1004: /*MC
1005:   MATSOLVERMKL_PARDISO -  A matrix type providing direct solvers (LU) for
1006:   sequential matrices via the external package MKL_PARDISO.

1008:   Works with MATSEQAIJ matrices

1010:   Use -pc_type lu -pc_factor_mat_solver_type mkl_pardiso to use this direct solver

1012:   Options Database Keys:
1013: + -mat_mkl_pardiso_65 - Suggested number of threads to use within MKL_PARDISO
1014: . -mat_mkl_pardiso_66 - Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
1015: . -mat_mkl_pardiso_67 - Indicates the actual matrix for the solution phase
1016: . -mat_mkl_pardiso_68 - Message level information, use 1 to get detailed information on the solver options
1017: . -mat_mkl_pardiso_69 - Defines the matrix type. IMPORTANT: When you set this flag, iparm parameters are going to be set to the default ones for the matrix type
1018: . -mat_mkl_pardiso_1  - Use default values
1019: . -mat_mkl_pardiso_2  - Fill-in reducing ordering for the input matrix
1020: . -mat_mkl_pardiso_4  - Preconditioned CGS/CG
1021: . -mat_mkl_pardiso_5  - User permutation
1022: . -mat_mkl_pardiso_6  - Write solution on x
1023: . -mat_mkl_pardiso_8  - Iterative refinement step
1024: . -mat_mkl_pardiso_10 - Pivoting perturbation
1025: . -mat_mkl_pardiso_11 - Scaling vectors
1026: . -mat_mkl_pardiso_12 - Solve with transposed or conjugate transposed matrix A
1027: . -mat_mkl_pardiso_13 - Improved accuracy using (non-) symmetric weighted matching
1028: . -mat_mkl_pardiso_18 - Numbers of non-zero elements
1029: . -mat_mkl_pardiso_19 - Report number of floating point operations
1030: . -mat_mkl_pardiso_21 - Pivoting for symmetric indefinite matrices
1031: . -mat_mkl_pardiso_24 - Parallel factorization control
1032: . -mat_mkl_pardiso_25 - Parallel forward/backward solve control
1033: . -mat_mkl_pardiso_27 - Matrix checker
1034: . -mat_mkl_pardiso_31 - Partial solve and computing selected components of the solution vectors
1035: . -mat_mkl_pardiso_34 - Optimal number of threads for conditional numerical reproducibility (CNR) mode
1036: - -mat_mkl_pardiso_60 - Intel MKL_PARDISO mode

1038:   Level: beginner

1040:   Notes:
1041:     Use -mat_mkl_pardiso_68 1 to display the number of threads the solver is using. MKL does not provide a way to directly access this
1042:     information.

1044:     For more information on the options check the MKL_Pardiso manual

1046: .seealso: PCFactorSetMatSolverType(), MatSolverType

1048: M*/
1049: static PetscErrorCode MatFactorGetSolverType_mkl_pardiso(Mat A, MatSolverType *type)
1050: {
1051:   *type = MATSOLVERMKL_PARDISO;
1052:   return 0;
1053: }

1055: PETSC_EXTERN PetscErrorCode MatGetFactor_aij_mkl_pardiso(Mat A,MatFactorType ftype,Mat *F)
1056: {
1057:   Mat             B;
1058:   Mat_MKL_PARDISO *mat_mkl_pardiso;
1059:   PetscBool       isSeqAIJ,isSeqBAIJ,isSeqSBAIJ;

1061:   PetscObjectBaseTypeCompare((PetscObject)A,MATSEQAIJ,&isSeqAIJ);
1062:   PetscObjectTypeCompare((PetscObject)A,MATSEQBAIJ,&isSeqBAIJ);
1063:   PetscObjectTypeCompare((PetscObject)A,MATSEQSBAIJ,&isSeqSBAIJ);
1064:   MatCreate(PetscObjectComm((PetscObject)A),&B);
1065:   MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1066:   PetscStrallocpy("mkl_pardiso",&((PetscObject)B)->type_name);
1067:   MatSetUp(B);

1069:   PetscNewLog(B,&mat_mkl_pardiso);
1070:   B->data = mat_mkl_pardiso;

1072:   MatFactorMKL_PARDISOInitialize_Private(A, ftype, mat_mkl_pardiso);
1073:   if (ftype == MAT_FACTOR_LU) {
1074:     B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMKL_PARDISO;
1075:     B->factortype            = MAT_FACTOR_LU;
1076:     mat_mkl_pardiso->needsym = PETSC_FALSE;
1077:     if (isSeqAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqaij;
1078:     else if (isSeqBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqbaij;
1080:     else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"No support for PARDISO LU with %s format",((PetscObject)A)->type_name);
1081: #if defined(PETSC_USE_COMPLEX)
1082:     mat_mkl_pardiso->mtype = 13;
1083: #else
1084:     mat_mkl_pardiso->mtype = 11;
1085: #endif
1086:   } else {
1087:     B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_AIJMKL_PARDISO;
1088:     B->factortype                  = MAT_FACTOR_CHOLESKY;
1089:     if (isSeqAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqaij;
1090:     else if (isSeqBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqbaij;
1091:     else if (isSeqSBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqsbaij;
1092:     else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"No support for PARDISO CHOLESKY with %s format",((PetscObject)A)->type_name);

1094:     mat_mkl_pardiso->needsym = PETSC_TRUE;
1095: #if !defined(PETSC_USE_COMPLEX)
1096:     if (A->spd_set && A->spd) mat_mkl_pardiso->mtype = 2;
1097:     else                      mat_mkl_pardiso->mtype = -2;
1098: #else
1099:     mat_mkl_pardiso->mtype = 6;
1101: #endif
1102:   }
1103:   B->ops->destroy = MatDestroy_MKL_PARDISO;
1104:   B->ops->view    = MatView_MKL_PARDISO;
1105:   B->ops->getinfo = MatGetInfo_MKL_PARDISO;
1106:   B->factortype   = ftype;
1107:   B->assembled    = PETSC_TRUE;

1109:   PetscFree(B->solvertype);
1110:   PetscStrallocpy(MATSOLVERMKL_PARDISO,&B->solvertype);

1112:   PetscObjectComposeFunction((PetscObject)B,"MatFactorGetSolverType_C",MatFactorGetSolverType_mkl_pardiso);
1113:   PetscObjectComposeFunction((PetscObject)B,"MatFactorSetSchurIS_C",MatFactorSetSchurIS_MKL_PARDISO);
1114:   PetscObjectComposeFunction((PetscObject)B,"MatMkl_PardisoSetCntl_C",MatMkl_PardisoSetCntl_MKL_PARDISO);

1116:   *F = B;
1117:   return 0;
1118: }

1120: PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_MKL_Pardiso(void)
1121: {
1122:   MatSolverTypeRegister(MATSOLVERMKL_PARDISO,MATSEQAIJ,MAT_FACTOR_LU,MatGetFactor_aij_mkl_pardiso);
1123:   MatSolverTypeRegister(MATSOLVERMKL_PARDISO,MATSEQAIJ,MAT_FACTOR_CHOLESKY,MatGetFactor_aij_mkl_pardiso);
1124:   MatSolverTypeRegister(MATSOLVERMKL_PARDISO,MATSEQBAIJ,MAT_FACTOR_LU,MatGetFactor_aij_mkl_pardiso);
1125:   MatSolverTypeRegister(MATSOLVERMKL_PARDISO,MATSEQSBAIJ,MAT_FACTOR_CHOLESKY,MatGetFactor_aij_mkl_pardiso);
1126:   return 0;
1127: }