Actual source code: ex42.c
1: /*$Id: ex42.c,v 1.25 2001/08/07 03:03:07 balay Exp $*/
3: static char help[] = "Tests MatIncreaseOverlap() and MatGetSubmatrices() for the parallel case.\n\
4: This example is similar to ex40.c; here the index sets used are random.\n\
5: Input arguments are:\n\
6: -f <input_file> : file to load. For a 5X5 example of the 5-pt. stencil,\n\
7: use the file petsc/src/mat/examples/matbinary.ex\n\
8: -nd <size> : > 0 no of domains per processor \n\
9: -ov <overlap> : >=0 amount of overlap between domains\n\n";
11: #include petscsles.h
15: int main(int argc,char **args)
16: {
17: int ierr,nd = 2,ov=1,i,j,size,m,n,rank,*idx;
18: PetscTruth flg;
19: Mat A,B,*submatA,*submatB;
20: char file[128];
21: PetscViewer fd;
22: IS *is1,*is2;
23: PetscRandom r;
24: PetscScalar rand;
26: PetscInitialize(&argc,&args,(char *)0,help);
27: #if defined(PETSC_USE_COMPLEX)
28: SETERRQ(1,"This example does not work with complex numbers");
29: #else
30:
31: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
32: PetscOptionsGetString(PETSC_NULL,"-f",file,127,PETSC_NULL);
33: PetscOptionsGetInt(PETSC_NULL,"-nd",&nd,PETSC_NULL);
34: PetscOptionsGetInt(PETSC_NULL,"-ov",&ov,PETSC_NULL);
36: /* Read matrix and RHS */
37: PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,PETSC_BINARY_RDONLY,&fd);
38: MatLoad(fd,MATMPIAIJ,&A);
39: PetscViewerDestroy(fd);
41: /* Read the matrix again as a seq matrix */
42: PetscViewerBinaryOpen(PETSC_COMM_SELF,file,PETSC_BINARY_RDONLY,&fd);
43: MatLoad(fd,MATSEQAIJ,&B);
44: PetscViewerDestroy(fd);
45:
46: /* Create the Random no generator */
47: MatGetSize(A,&m,&n);
48: PetscRandomCreate(PETSC_COMM_SELF,RANDOM_DEFAULT,&r);
50: /* Create the IS corresponding to subdomains */
51: PetscMalloc(nd*sizeof(IS **),&is1);
52: PetscMalloc(nd*sizeof(IS **),&is2);
53: PetscMalloc(m *sizeof(int),&idx);
54:
55: /* Create the random Index Sets */
56: for (i=0; i<nd; i++) {
57: /* Skip a few,so that the IS on different procs are diffeent*/
58: for (j=0; j<rank; j++) {
59: PetscRandomGetValue(r,&rand);
60: }
61: PetscRandomGetValue(r,&rand);
62: size = (int)(rand*m);
63: for (j=0; j<size; j++) {
64: PetscRandomGetValue(r,&rand);
65: idx[j] = (int)(rand*m);
66: }
67: PetscSortInt(size,idx);
68: ISCreateGeneral(PETSC_COMM_SELF,size,idx,is1+i);
69: ISCreateGeneral(PETSC_COMM_SELF,size,idx,is2+i);
70: }
72: MatIncreaseOverlap(A,nd,is1,ov);
73: MatIncreaseOverlap(B,nd,is2,ov);
75: for (i=0; i<nd; ++i) {
76: ISSort(is1[i]);
77: ISSort(is2[i]);
78: }
79:
80: MatGetSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);
81: MatGetSubMatrices(B,nd,is2,is2,MAT_INITIAL_MATRIX,&submatB);
82:
83: /* Now see if the serial and parallel case have the same answers */
84: for (i=0; i<nd; ++i) {
85: MatEqual(submatA[i],submatB[i],&flg);
86: PetscPrintf(PETSC_COMM_SELF,"proc:[%d], i=%d, flg =%d\n",rank,i,flg);
87: }
89: /* Free Allocated Memory */
90: for (i=0; i<nd; ++i) {
91: ISDestroy(is1[i]);
92: ISDestroy(is2[i]);
93: MatDestroy(submatA[i]);
94: MatDestroy(submatB[i]);
95: }
96: PetscFree(submatA);
97: PetscFree(submatB);
98: PetscRandomDestroy(r);
99: PetscFree(is1);
100: PetscFree(is2);
101: MatDestroy(A);
102: MatDestroy(B);
103: PetscFree(idx);
105: PetscFinalize();
106: #endif
107: return 0;
108: }