Actual source code: ex2.c
2: /* Program usage: mpiexec -n <procs> ex2 [-help] [all PETSc options] */
4: static char help[] = "Solves a linear system in parallel with KSP.\n\
5: Input parameters include:\n\
6: -random_exact_sol : use a random exact solution vector\n\
7: -view_exact_sol : write exact solution vector to stdout\n\
8: -m <mesh_x> : number of mesh points in x-direction\n\
9: -n <mesh_n> : number of mesh points in y-direction\n\n";
11: /*T
12: Concepts: KSP^basic parallel example;
13: Concepts: KSP^Laplacian, 2d
14: Concepts: Laplacian, 2d
15: Processors: n
16: T*/
18: /*
19: Include "petscksp.h" so that we can use KSP solvers. Note that this file
20: automatically includes:
21: petscsys.h - base PETSc routines petscvec.h - vectors
22: petscmat.h - matrices
23: petscis.h - index sets petscksp.h - Krylov subspace methods
24: petscviewer.h - viewers petscpc.h - preconditioners
25: */
26: #include <petscksp.h>
30: int main(int argc,char **args)
31: {
32: Vec x,b,u; /* approx solution, RHS, exact solution */
33: Mat A; /* linear system matrix */
34: KSP ksp; /* linear solver context */
35: PetscRandom rctx; /* random number generator context */
36: PetscReal norm; /* norm of solution error */
37: PetscInt i,j,Ii,J,Istart,Iend,m = 8,n = 7,its;
39: PetscBool flg = PETSC_FALSE;
40: PetscScalar v;
41: #if defined(PETSC_USE_LOG)
42: PetscLogStage stage;
43: #endif
45: PetscInitialize(&argc,&args,(char *)0,help);
46: PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
47: PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);
48: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
49: Compute the matrix and right-hand-side vector that define
50: the linear system, Ax = b.
51: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
52: /*
53: Create parallel matrix, specifying only its global dimensions.
54: When using MatCreate(), the matrix format can be specified at
55: runtime. Also, the parallel partitioning of the matrix is
56: determined by PETSc at runtime.
58: Performance tuning note: For problems of substantial size,
59: preallocation of matrix memory is crucial for attaining good
60: performance. See the matrix chapter of the users manual for details.
61: */
62: MatCreate(PETSC_COMM_WORLD,&A);
63: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
64: MatSetFromOptions(A);
65: MatMPIAIJSetPreallocation(A,5,PETSC_NULL,5,PETSC_NULL);
66: MatSeqAIJSetPreallocation(A,5,PETSC_NULL);
68: /*
69: Currently, all PETSc parallel matrix formats are partitioned by
70: contiguous chunks of rows across the processors. Determine which
71: rows of the matrix are locally owned.
72: */
73: MatGetOwnershipRange(A,&Istart,&Iend);
75: /*
76: Set matrix elements for the 2-D, five-point stencil in parallel.
77: - Each processor needs to insert only elements that it owns
78: locally (but any non-local elements will be sent to the
79: appropriate processor during matrix assembly).
80: - Always specify global rows and columns of matrix entries.
82: Note: this uses the less common natural ordering that orders first
83: all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
84: instead of J = I +- m as you might expect. The more standard ordering
85: would first do all variables for y = h, then y = 2h etc.
87: */
88: PetscLogStageRegister("Assembly", &stage);
89: PetscLogStagePush(stage);
90: for (Ii=Istart; Ii<Iend; Ii++) {
91: v = -1.0; i = Ii/n; j = Ii - i*n;
92: if (i>0) {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
93: if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
94: if (j>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
95: if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
96: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
97: }
99: /*
100: Assemble matrix, using the 2-step process:
101: MatAssemblyBegin(), MatAssemblyEnd()
102: Computations can be done while messages are in transition
103: by placing code between these two statements.
104: */
105: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
106: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
107: PetscLogStagePop();
109: /* A is symmetric. Set symmetric flag to enable ICC/Cholesky preconditioner */
110: MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);
112: /*
113: Create parallel vectors.
114: - We form 1 vector from scratch and then duplicate as needed.
115: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
116: in this example, we specify only the
117: vector's global dimension; the parallel partitioning is determined
118: at runtime.
119: - When solving a linear system, the vectors and matrices MUST
120: be partitioned accordingly. PETSc automatically generates
121: appropriately partitioned matrices and vectors when MatCreate()
122: and VecCreate() are used with the same communicator.
123: - The user can alternatively specify the local vector and matrix
124: dimensions when more sophisticated partitioning is needed
125: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
126: below).
127: */
128: VecCreate(PETSC_COMM_WORLD,&u);
129: VecSetSizes(u,PETSC_DECIDE,m*n);
130: VecSetFromOptions(u);
131: VecDuplicate(u,&b);
132: VecDuplicate(b,&x);
134: /*
135: Set exact solution; then compute right-hand-side vector.
136: By default we use an exact solution of a vector with all
137: elements of 1.0; Alternatively, using the runtime option
138: -random_sol forms a solution vector with random components.
139: */
140: PetscOptionsGetBool(PETSC_NULL,"-random_exact_sol",&flg,PETSC_NULL);
141: if (flg) {
142: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
143: PetscRandomSetFromOptions(rctx);
144: VecSetRandom(u,rctx);
145: PetscRandomDestroy(&rctx);
146: } else {
147: VecSet(u,1.0);
148: }
149: MatMult(A,u,b);
151: /*
152: View the exact solution vector if desired
153: */
154: flg = PETSC_FALSE;
155: PetscOptionsGetBool(PETSC_NULL,"-view_exact_sol",&flg,PETSC_NULL);
156: if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}
158: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
159: Create the linear solver and set various options
160: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
162: /*
163: Create linear solver context
164: */
165: KSPCreate(PETSC_COMM_WORLD,&ksp);
167: /*
168: Set operators. Here the matrix that defines the linear system
169: also serves as the preconditioning matrix.
170: */
171: KSPSetOperators(ksp,A,A,DIFFERENT_NONZERO_PATTERN);
173: /*
174: Set linear solver defaults for this problem (optional).
175: - By extracting the KSP and PC contexts from the KSP context,
176: we can then directly call any KSP and PC routines to set
177: various options.
178: - The following two statements are optional; all of these
179: parameters could alternatively be specified at runtime via
180: KSPSetFromOptions(). All of these defaults can be
181: overridden at runtime, as indicated below.
182: */
183: KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,
184: PETSC_DEFAULT);
186: /*
187: Set runtime options, e.g.,
188: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
189: These options will override those specified above as long as
190: KSPSetFromOptions() is called _after_ any other customization
191: routines.
192: */
193: KSPSetFromOptions(ksp);
195: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
196: Solve the linear system
197: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
199: KSPSolve(ksp,b,x);
201: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
202: Check solution and clean up
203: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
205: /*
206: Check the error
207: */
208: VecAXPY(x,-1.0,u);
209: VecNorm(x,NORM_2,&norm);
210: KSPGetIterationNumber(ksp,&its);
211: /* Scale the norm */
212: /* norm *= sqrt(1.0/((m+1)*(n+1))); */
214: /*
215: Print convergence information. PetscPrintf() produces a single
216: print statement from all processes that share a communicator.
217: An alternative is PetscFPrintf(), which prints to a file.
218: */
219: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %A iterations %D\n",
220: norm,its);
222: /*
223: Free work space. All PETSc objects should be destroyed when they
224: are no longer needed.
225: */
226: KSPDestroy(&ksp);
227: VecDestroy(&u); VecDestroy(&x);
228: VecDestroy(&b); MatDestroy(&A);
230: /*
231: Always call PetscFinalize() before exiting a program. This routine
232: - finalizes the PETSc libraries as well as MPI
233: - provides summary and diagnostic information if certain runtime
234: options are chosen (e.g., -log_summary).
235: */
236: PetscFinalize();
237: return 0;
238: }