ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pcpttrs.f
Go to the documentation of this file.
1  SUBROUTINE pcpttrs( UPLO, N, NRHS, D, E, JA, DESCA, B, IB, DESCB,
2  $ AF, LAF, WORK, LWORK, INFO )
3 *
4 *
5 *
6 * -- ScaLAPACK routine (version 1.7) --
7 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
8 * and University of California, Berkeley.
9 * August 7, 2001
10 *
11 * .. Scalar Arguments ..
12  CHARACTER UPLO
13  INTEGER IB, INFO, JA, LAF, LWORK, N, NRHS
14 * ..
15 * .. Array Arguments ..
16  INTEGER DESCA( * ), DESCB( * )
17  COMPLEX AF( * ), B( * ), E( * ), WORK( * )
18  REAL D( * )
19 * ..
20 *
21 *
22 * Purpose
23 * =======
24 *
25 * PCPTTRS solves a system of linear equations
26 *
27 * A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
28 *
29 * where A(1:N, JA:JA+N-1) is the matrix used to produce the factors
30 * stored in A(1:N,JA:JA+N-1) and AF by PCPTTRF.
31 * A(1:N, JA:JA+N-1) is an N-by-N complex
32 * tridiagonal symmetric positive definite distributed
33 * matrix.
34 * Depending on the value of UPLO, A stores either U or L in the equn
35 * A(1:N, JA:JA+N-1) = U'D *U or L*D L' as computed by PCPTTRF.
36 *
37 * Routine PCPTTRF MUST be called first.
38 *
39 * =====================================================================
40 *
41 * Arguments
42 * =========
43 *
44 * UPLO (global input) CHARACTER
45 * = 'U': Upper triangle of A(1:N, JA:JA+N-1) is stored;
46 * = 'L': Lower triangle of A(1:N, JA:JA+N-1) is stored.
47 *
48 * N (global input) INTEGER
49 * The number of rows and columns to be operated on, i.e. the
50 * order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
51 *
52 * NRHS (global input) INTEGER
53 * The number of right hand sides, i.e., the number of columns
54 * of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
55 * NRHS >= 0.
56 *
57 * D (local input/local output) COMPLEX pointer to local
58 * part of global vector storing the main diagonal of the
59 * matrix.
60 * On exit, this array contains information containing the
61 * factors of the matrix.
62 * Must be of size >= DESCA( NB_ ).
63 *
64 * E (local input/local output) COMPLEX pointer to local
65 * part of global vector storing the upper diagonal of the
66 * matrix. Globally, DU(n) is not referenced, and DU must be
67 * aligned with D.
68 * On exit, this array contains information containing the
69 * factors of the matrix.
70 * Must be of size >= DESCA( NB_ ).
71 *
72 * JA (global input) INTEGER
73 * The index in the global array A that points to the start of
74 * the matrix to be operated on (which may be either all of A
75 * or a submatrix of A).
76 *
77 * DESCA (global and local input) INTEGER array of dimension DLEN.
78 * if 1D type (DTYPE_A=501 or 502), DLEN >= 7;
79 * if 2D type (DTYPE_A=1), DLEN >= 9.
80 * The array descriptor for the distributed matrix A.
81 * Contains information of mapping of A to memory. Please
82 * see NOTES below for full description and options.
83 *
84 * B (local input/local output) COMPLEX pointer into
85 * local memory to an array of local lead dimension lld_b>=NB.
86 * On entry, this array contains the
87 * the local pieces of the right hand sides
88 * B(IB:IB+N-1, 1:NRHS).
89 * On exit, this contains the local piece of the solutions
90 * distributed matrix X.
91 *
92 * IB (global input) INTEGER
93 * The row index in the global array B that points to the first
94 * row of the matrix to be operated on (which may be either
95 * all of B or a submatrix of B).
96 * IMPORTANT NOTE: The current version of this code supports
97 * only IB=JA
98 *
99 * DESCB (global and local input) INTEGER array of dimension DLEN.
100 * if 1D type (DTYPE_B=502), DLEN >=7;
101 * if 2D type (DTYPE_B=1), DLEN >= 9.
102 * The array descriptor for the distributed matrix B.
103 * Contains information of mapping of B to memory. Please
104 * see NOTES below for full description and options.
105 *
106 * AF (local output) COMPLEX array, dimension LAF.
107 * Auxiliary Fillin Space.
108 * Fillin is created during the factorization routine
109 * PCPTTRF and this is stored in AF. If a linear system
110 * is to be solved using PCPTTRS after the factorization
111 * routine, AF *must not be altered* after the factorization.
112 *
113 * LAF (local input) INTEGER
114 * Size of user-input Auxiliary Fillin space AF. Must be >=
115 * (NB+2)
116 * If LAF is not large enough, an error code will be returned
117 * and the minimum acceptable size will be returned in AF( 1 )
118 *
119 * WORK (local workspace/local output)
120 * COMPLEX temporary workspace. This space may
121 * be overwritten in between calls to routines. WORK must be
122 * the size given in LWORK.
123 * On exit, WORK( 1 ) contains the minimal LWORK.
124 *
125 * LWORK (local input or global input) INTEGER
126 * Size of user-input workspace WORK.
127 * If LWORK is too small, the minimal acceptable size will be
128 * returned in WORK(1) and an error code is returned. LWORK>=
129 * (10+2*min(100,NRHS))*NPCOL+4*NRHS
130 *
131 * INFO (local output) INTEGER
132 * = 0: successful exit
133 * < 0: If the i-th argument is an array and the j-entry had
134 * an illegal value, then INFO = -(i*100+j), if the i-th
135 * argument is a scalar and had an illegal value, then
136 * INFO = -i.
137 *
138 * =====================================================================
139 *
140 *
141 * Restrictions
142 * ============
143 *
144 * The following are restrictions on the input parameters. Some of these
145 * are temporary and will be removed in future releases, while others
146 * may reflect fundamental technical limitations.
147 *
148 * Non-cyclic restriction: VERY IMPORTANT!
149 * P*NB>= mod(JA-1,NB)+N.
150 * The mapping for matrices must be blocked, reflecting the nature
151 * of the divide and conquer algorithm as a task-parallel algorithm.
152 * This formula in words is: no processor may have more than one
153 * chunk of the matrix.
154 *
155 * Blocksize cannot be too small:
156 * If the matrix spans more than one processor, the following
157 * restriction on NB, the size of each block on each processor,
158 * must hold:
159 * NB >= 2
160 * The bulk of parallel computation is done on the matrix of size
161 * O(NB) on each processor. If this is too small, divide and conquer
162 * is a poor choice of algorithm.
163 *
164 * Submatrix reference:
165 * JA = IB
166 * Alignment restriction that prevents unnecessary communication.
167 *
168 *
169 * =====================================================================
170 *
171 *
172 * Notes
173 * =====
174 *
175 * If the factorization routine and the solve routine are to be called
176 * separately (to solve various sets of righthand sides using the same
177 * coefficient matrix), the auxiliary space AF *must not be altered*
178 * between calls to the factorization routine and the solve routine.
179 *
180 * The best algorithm for solving banded and tridiagonal linear systems
181 * depends on a variety of parameters, especially the bandwidth.
182 * Currently, only algorithms designed for the case N/P >> bw are
183 * implemented. These go by many names, including Divide and Conquer,
184 * Partitioning, domain decomposition-type, etc.
185 * For tridiagonal matrices, it is obvious: N/P >> bw(=1), and so D&C
186 * algorithms are the appropriate choice.
187 *
188 * Algorithm description: Divide and Conquer
189 *
190 * The Divide and Conqer algorithm assumes the matrix is narrowly
191 * banded compared with the number of equations. In this situation,
192 * it is best to distribute the input matrix A one-dimensionally,
193 * with columns atomic and rows divided amongst the processes.
194 * The basic algorithm divides the tridiagonal matrix up into
195 * P pieces with one stored on each processor,
196 * and then proceeds in 2 phases for the factorization or 3 for the
197 * solution of a linear system.
198 * 1) Local Phase:
199 * The individual pieces are factored independently and in
200 * parallel. These factors are applied to the matrix creating
201 * fillin, which is stored in a non-inspectable way in auxiliary
202 * space AF. Mathematically, this is equivalent to reordering
203 * the matrix A as P A P^T and then factoring the principal
204 * leading submatrix of size equal to the sum of the sizes of
205 * the matrices factored on each processor. The factors of
206 * these submatrices overwrite the corresponding parts of A
207 * in memory.
208 * 2) Reduced System Phase:
209 * A small ((P-1)) system is formed representing
210 * interaction of the larger blocks, and is stored (as are its
211 * factors) in the space AF. A parallel Block Cyclic Reduction
212 * algorithm is used. For a linear system, a parallel front solve
213 * followed by an analagous backsolve, both using the structure
214 * of the factored matrix, are performed.
215 * 3) Backsubsitution Phase:
216 * For a linear system, a local backsubstitution is performed on
217 * each processor in parallel.
218 *
219 *
220 * Descriptors
221 * ===========
222 *
223 * Descriptors now have *types* and differ from ScaLAPACK 1.0.
224 *
225 * Note: tridiagonal codes can use either the old two dimensional
226 * or new one-dimensional descriptors, though the processor grid in
227 * both cases *must be one-dimensional*. We describe both types below.
228 *
229 * Each global data object is described by an associated description
230 * vector. This vector stores the information required to establish
231 * the mapping between an object element and its corresponding process
232 * and memory location.
233 *
234 * Let A be a generic term for any 2D block cyclicly distributed array.
235 * Such a global array has an associated description vector DESCA.
236 * In the following comments, the character _ should be read as
237 * "of the global array".
238 *
239 * NOTATION STORED IN EXPLANATION
240 * --------------- -------------- --------------------------------------
241 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
242 * DTYPE_A = 1.
243 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
244 * the BLACS process grid A is distribu-
245 * ted over. The context itself is glo-
246 * bal, but the handle (the integer
247 * value) may vary.
248 * M_A (global) DESCA( M_ ) The number of rows in the global
249 * array A.
250 * N_A (global) DESCA( N_ ) The number of columns in the global
251 * array A.
252 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
253 * the rows of the array.
254 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
255 * the columns of the array.
256 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
257 * row of the array A is distributed.
258 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
259 * first column of the array A is
260 * distributed.
261 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
262 * array. LLD_A >= MAX(1,LOCr(M_A)).
263 *
264 * Let K be the number of rows or columns of a distributed matrix,
265 * and assume that its process grid has dimension p x q.
266 * LOCr( K ) denotes the number of elements of K that a process
267 * would receive if K were distributed over the p processes of its
268 * process column.
269 * Similarly, LOCc( K ) denotes the number of elements of K that a
270 * process would receive if K were distributed over the q processes of
271 * its process row.
272 * The values of LOCr() and LOCc() may be determined via a call to the
273 * ScaLAPACK tool function, NUMROC:
274 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
275 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
276 * An upper bound for these quantities may be computed by:
277 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
278 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
279 *
280 *
281 * One-dimensional descriptors:
282 *
283 * One-dimensional descriptors are a new addition to ScaLAPACK since
284 * version 1.0. They simplify and shorten the descriptor for 1D
285 * arrays.
286 *
287 * Since ScaLAPACK supports two-dimensional arrays as the fundamental
288 * object, we allow 1D arrays to be distributed either over the
289 * first dimension of the array (as if the grid were P-by-1) or the
290 * 2nd dimension (as if the grid were 1-by-P). This choice is
291 * indicated by the descriptor type (501 or 502)
292 * as described below.
293 * However, for tridiagonal matrices, since the objects being
294 * distributed are the individual vectors storing the diagonals, we
295 * have adopted the convention that both the P-by-1 descriptor and
296 * the 1-by-P descriptor are allowed and are equivalent for
297 * tridiagonal matrices. Thus, for tridiagonal matrices,
298 * DTYPE_A = 501 or 502 can be used interchangeably
299 * without any other change.
300 * We require that the distributed vectors storing the diagonals of a
301 * tridiagonal matrix be aligned with each other. Because of this, a
302 * single descriptor, DESCA, serves to describe the distribution of
303 * of all diagonals simultaneously.
304 *
305 * IMPORTANT NOTE: the actual BLACS grid represented by the
306 * CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
307 * irrespective of which one-dimensional descriptor type
308 * (501 or 502) is input.
309 * This routine will interpret the grid properly either way.
310 * ScaLAPACK routines *do not support intercontext operations* so that
311 * the grid passed to a single ScaLAPACK routine *must be the same*
312 * for all array descriptors passed to that routine.
313 *
314 * NOTE: In all cases where 1D descriptors are used, 2D descriptors
315 * may also be used, since a one-dimensional array is a special case
316 * of a two-dimensional array with one dimension of size unity.
317 * The two-dimensional array used in this case *must* be of the
318 * proper orientation:
319 * If the appropriate one-dimensional descriptor is DTYPEA=501
320 * (1 by P type), then the two dimensional descriptor must
321 * have a CTXT value that refers to a 1 by P BLACS grid;
322 * If the appropriate one-dimensional descriptor is DTYPEA=502
323 * (P by 1 type), then the two dimensional descriptor must
324 * have a CTXT value that refers to a P by 1 BLACS grid.
325 *
326 *
327 * Summary of allowed descriptors, types, and BLACS grids:
328 * DTYPE 501 502 1 1
329 * BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
330 * -----------------------------------------------------
331 * A OK OK OK NO
332 * B NO OK NO OK
333 *
334 * Note that a consequence of this chart is that it is not possible
335 * for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
336 * to opposite requirements for the orientation of the BLACS grid,
337 * and as noted before, the *same* BLACS context must be used in
338 * all descriptors in a single ScaLAPACK subroutine call.
339 *
340 * Let A be a generic term for any 1D block cyclicly distributed array.
341 * Such a global array has an associated description vector DESCA.
342 * In the following comments, the character _ should be read as
343 * "of the global array".
344 *
345 * NOTATION STORED IN EXPLANATION
346 * --------------- ---------- ------------------------------------------
347 * DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
348 * TYPE_A = 501: 1-by-P grid.
349 * TYPE_A = 502: P-by-1 grid.
350 * CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
351 * the BLACS process grid A is distribu-
352 * ted over. The context itself is glo-
353 * bal, but the handle (the integer
354 * value) may vary.
355 * N_A (global) DESCA( 3 ) The size of the array dimension being
356 * distributed.
357 * NB_A (global) DESCA( 4 ) The blocking factor used to distribute
358 * the distributed dimension of the array.
359 * SRC_A (global) DESCA( 5 ) The process row or column over which the
360 * first row or column of the array
361 * is distributed.
362 * Ignored DESCA( 6 ) Ignored for tridiagonal matrices.
363 * Reserved DESCA( 7 ) Reserved for future use.
364 *
365 *
366 *
367 * =====================================================================
368 *
369 * Code Developer: Andrew J. Cleary, University of Tennessee.
370 * Current address: Lawrence Livermore National Labs.
371 * This version released: August, 2001.
372 *
373 * =====================================================================
374 *
375 * ..
376 * .. Parameters ..
377  REAL ONE, ZERO
378  parameter( one = 1.0e+0 )
379  parameter( zero = 0.0e+0 )
380  COMPLEX CONE, CZERO
381  parameter( cone = ( 1.0e+0, 0.0e+0 ) )
382  parameter( czero = ( 0.0e+0, 0.0e+0 ) )
383  INTEGER INT_ONE
384  parameter( int_one = 1 )
385  INTEGER DESCMULT, BIGNUM
386  parameter(descmult = 100, bignum = descmult * descmult)
387  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
388  $ lld_, mb_, m_, nb_, n_, rsrc_
389  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
390  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
391  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
392 * ..
393 * .. Local Scalars ..
394  INTEGER CSRC, FIRST_PROC, I, ICTXT, ICTXT_NEW,
395  $ ictxt_save, idum1, idum3, ja_new, llda, lldb,
396  $ mycol, myrow, my_num_cols, nb, np, npcol,
397  $ nprow, np_save, odd_size, part_offset,
398  $ part_size, return_code, store_m_b,
399  $ store_n_a, temp, work_size_min
400 * ..
401 * .. Local Arrays ..
402  INTEGER DESCA_1XP( 7 ), DESCB_PX1( 7 ),
403  $ param_check( 15, 3 )
404 * ..
405 * .. External Subroutines ..
406  EXTERNAL blacs_gridinfo, desc_convert, globchk,
408 * ..
409 * .. External Functions ..
410  LOGICAL LSAME
411  INTEGER NUMROC
412  EXTERNAL lsame, numroc
413 * ..
414 * .. Intrinsic Functions ..
415  INTRINSIC ichar, min, mod
416 * ..
417 * .. Executable Statements ..
418 *
419 * Test the input parameters
420 *
421  info = 0
422 *
423 * Convert descriptor into standard form for easy access to
424 * parameters, check that grid is of right shape.
425 *
426  desca_1xp( 1 ) = 501
427  descb_px1( 1 ) = 502
428 *
429  temp = desca( dtype_ )
430  IF( temp .EQ. 502 ) THEN
431 * Temporarily set the descriptor type to 1xP type
432  desca( dtype_ ) = 501
433  ENDIF
434 *
435  CALL desc_convert( desca, desca_1xp, return_code )
436 *
437  desca( dtype_ ) = temp
438 *
439  IF( return_code .NE. 0) THEN
440  info = -( 6*100 + 2 )
441  ENDIF
442 *
443  CALL desc_convert( descb, descb_px1, return_code )
444 *
445  IF( return_code .NE. 0) THEN
446  info = -( 9*100 + 2 )
447  ENDIF
448 *
449 * Consistency checks for DESCA and DESCB.
450 *
451 * Context must be the same
452  IF( desca_1xp( 2 ) .NE. descb_px1( 2 ) ) THEN
453  info = -( 9*100 + 2 )
454  ENDIF
455 *
456 * These are alignment restrictions that may or may not be removed
457 * in future releases. -Andy Cleary, April 14, 1996.
458 *
459 * Block sizes must be the same
460  IF( desca_1xp( 4 ) .NE. descb_px1( 4 ) ) THEN
461  info = -( 9*100 + 4 )
462  ENDIF
463 *
464 * Source processor must be the same
465 *
466  IF( desca_1xp( 5 ) .NE. descb_px1( 5 ) ) THEN
467  info = -( 9*100 + 5 )
468  ENDIF
469 *
470 * Get values out of descriptor for use in code.
471 *
472  ictxt = desca_1xp( 2 )
473  csrc = desca_1xp( 5 )
474  nb = desca_1xp( 4 )
475  llda = desca_1xp( 6 )
476  store_n_a = desca_1xp( 3 )
477  lldb = descb_px1( 6 )
478  store_m_b = descb_px1( 3 )
479 *
480 * Get grid parameters
481 *
482 *
483  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
484  np = nprow * npcol
485 *
486 *
487 *
488  IF( lsame( uplo, 'U' ) ) THEN
489  idum1 = ichar( 'U' )
490  ELSE IF ( lsame( uplo, 'L' ) ) THEN
491  idum1 = ichar( 'L' )
492  ELSE
493  info = -1
494  END IF
495 *
496  IF( lwork .LT. -1) THEN
497  info = -13
498  ELSE IF ( lwork .EQ. -1 ) THEN
499  idum3 = -1
500  ELSE
501  idum3 = 1
502  ENDIF
503 *
504  IF( n .LT. 0 ) THEN
505  info = -2
506  ENDIF
507 *
508  IF( n+ja-1 .GT. store_n_a ) THEN
509  info = -( 6*100 + 6 )
510  ENDIF
511 *
512  IF( n+ib-1 .GT. store_m_b ) THEN
513  info = -( 9*100 + 3 )
514  ENDIF
515 *
516  IF( lldb .LT. nb ) THEN
517  info = -( 9*100 + 6 )
518  ENDIF
519 *
520  IF( nrhs .LT. 0 ) THEN
521  info = -3
522  ENDIF
523 *
524 * Current alignment restriction
525 *
526  IF( ja .NE. ib) THEN
527  info = -5
528  ENDIF
529 *
530 * Argument checking that is specific to Divide & Conquer routine
531 *
532  IF( nprow .NE. 1 ) THEN
533  info = -( 6*100+2 )
534  ENDIF
535 *
536  IF( n .GT. np*nb-mod( ja-1, nb )) THEN
537  info = -( 2 )
538  CALL pxerbla( ictxt,
539  $ 'PCPTTRS, D&C alg.: only 1 block per proc',
540  $ -info )
541  RETURN
542  ENDIF
543 *
544  IF((ja+n-1.GT.nb) .AND. ( nb.LT.2*int_one )) THEN
545  info = -( 6*100+4 )
546  CALL pxerbla( ictxt,
547  $ 'PCPTTRS, D&C alg.: NB too small',
548  $ -info )
549  RETURN
550  ENDIF
551 *
552 *
553  work_size_min =
554  $ (10+2*min(100,nrhs))*npcol+4*nrhs
555 *
556  work( 1 ) = work_size_min
557 *
558  IF( lwork .LT. work_size_min ) THEN
559  IF( lwork .NE. -1 ) THEN
560  info = -13
561  CALL pxerbla( ictxt,
562  $ 'PCPTTRS: worksize error',
563  $ -info )
564  ENDIF
565  RETURN
566  ENDIF
567 *
568 * Pack params and positions into arrays for global consistency check
569 *
570  param_check( 15, 1 ) = descb(5)
571  param_check( 14, 1 ) = descb(4)
572  param_check( 13, 1 ) = descb(3)
573  param_check( 12, 1 ) = descb(2)
574  param_check( 11, 1 ) = descb(1)
575  param_check( 10, 1 ) = ib
576  param_check( 9, 1 ) = desca(5)
577  param_check( 8, 1 ) = desca(4)
578  param_check( 7, 1 ) = desca(3)
579  param_check( 6, 1 ) = desca(1)
580  param_check( 5, 1 ) = ja
581  param_check( 4, 1 ) = nrhs
582  param_check( 3, 1 ) = n
583  param_check( 2, 1 ) = idum3
584  param_check( 1, 1 ) = idum1
585 *
586  param_check( 15, 2 ) = 905
587  param_check( 14, 2 ) = 904
588  param_check( 13, 2 ) = 903
589  param_check( 12, 2 ) = 902
590  param_check( 11, 2 ) = 901
591  param_check( 10, 2 ) = 8
592  param_check( 9, 2 ) = 605
593  param_check( 8, 2 ) = 604
594  param_check( 7, 2 ) = 603
595  param_check( 6, 2 ) = 601
596  param_check( 5, 2 ) = 5
597  param_check( 4, 2 ) = 3
598  param_check( 3, 2 ) = 2
599  param_check( 2, 2 ) = 13
600  param_check( 1, 2 ) = 1
601 *
602 * Want to find errors with MIN( ), so if no error, set it to a big
603 * number. If there already is an error, multiply by the the
604 * descriptor multiplier.
605 *
606  IF( info.GE.0 ) THEN
607  info = bignum
608  ELSE IF( info.LT.-descmult ) THEN
609  info = -info
610  ELSE
611  info = -info * descmult
612  END IF
613 *
614 * Check consistency across processors
615 *
616  CALL globchk( ictxt, 15, param_check, 15,
617  $ param_check( 1, 3 ), info )
618 *
619 * Prepare output: set info = 0 if no error, and divide by DESCMULT
620 * if error is not in a descriptor entry.
621 *
622  IF( info.EQ.bignum ) THEN
623  info = 0
624  ELSE IF( mod( info, descmult ) .EQ. 0 ) THEN
625  info = -info / descmult
626  ELSE
627  info = -info
628  END IF
629 *
630  IF( info.LT.0 ) THEN
631  CALL pxerbla( ictxt, 'PCPTTRS', -info )
632  RETURN
633  END IF
634 *
635 * Quick return if possible
636 *
637  IF( n.EQ.0 )
638  $ RETURN
639 *
640  IF( nrhs.EQ.0 )
641  $ RETURN
642 *
643 *
644 * Adjust addressing into matrix space to properly get into
645 * the beginning part of the relevant data
646 *
647  part_offset = nb*( (ja-1)/(npcol*nb) )
648 *
649  IF ( (mycol-csrc) .LT. (ja-part_offset-1)/nb ) THEN
650  part_offset = part_offset + nb
651  ENDIF
652 *
653  IF ( mycol .LT. csrc ) THEN
654  part_offset = part_offset - nb
655  ENDIF
656 *
657 * Form a new BLACS grid (the "standard form" grid) with only procs
658 * holding part of the matrix, of size 1xNP where NP is adjusted,
659 * starting at csrc=0, with JA modified to reflect dropped procs.
660 *
661 * First processor to hold part of the matrix:
662 *
663  first_proc = mod( ( ja-1 )/nb+csrc, npcol )
664 *
665 * Calculate new JA one while dropping off unused processors.
666 *
667  ja_new = mod( ja-1, nb ) + 1
668 *
669 * Save and compute new value of NP
670 *
671  np_save = np
672  np = ( ja_new+n-2 )/nb + 1
673 *
674 * Call utility routine that forms "standard-form" grid
675 *
676  CALL reshape( ictxt, int_one, ictxt_new, int_one,
677  $ first_proc, int_one, np )
678 *
679 * Use new context from standard grid as context.
680 *
681  ictxt_save = ictxt
682  ictxt = ictxt_new
683  desca_1xp( 2 ) = ictxt_new
684  descb_px1( 2 ) = ictxt_new
685 *
686 * Get information about new grid.
687 *
688  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
689 *
690 * Drop out processors that do not have part of the matrix.
691 *
692  IF( myrow .LT. 0 ) THEN
693  GOTO 1234
694  ENDIF
695 *
696 * ********************************
697 * Values reused throughout routine
698 *
699 * User-input value of partition size
700 *
701  part_size = nb
702 *
703 * Number of columns in each processor
704 *
705  my_num_cols = numroc( n, part_size, mycol, 0, npcol )
706 *
707 * Offset in columns to beginning of main partition in each proc
708 *
709  IF ( mycol .EQ. 0 ) THEN
710  part_offset = part_offset+mod( ja_new-1, part_size )
711  my_num_cols = my_num_cols - mod(ja_new-1, part_size )
712  ENDIF
713 *
714 * Size of main (or odd) partition in each processor
715 *
716  odd_size = my_num_cols
717  IF ( mycol .LT. np-1 ) THEN
718  odd_size = odd_size - int_one
719  ENDIF
720 *
721 *
722 *
723 * Begin main code
724 *
725  info = 0
726 *
727 * Call frontsolve routine
728 *
729  IF( lsame( uplo, 'L' ) ) THEN
730 *
731  CALL pcpttrsv( 'L', 'N', n, nrhs, d( part_offset+1 ),
732  $ e( part_offset+1 ), ja_new, desca_1xp, b, ib,
733  $ descb_px1, af, laf, work, lwork, info )
734 *
735  ELSE
736 *
737  CALL pcpttrsv( 'U', 'C', n, nrhs, d( part_offset+1 ),
738  $ e( part_offset+1 ), ja_new, desca_1xp, b, ib,
739  $ descb_px1, af, laf, work, lwork, info )
740 *
741  ENDIF
742 *
743 * Divide by the main diagonal: B <- D^{-1} B
744 *
745 * The main partition is first
746 *
747  DO 10 i=part_offset+1, part_offset+odd_size
748  CALL cscal( nrhs, cmplx( cone/d( i ) ), b( i ), lldb )
749  10 CONTINUE
750 *
751 * Reduced system is next
752 *
753  IF( mycol .LT. npcol-1 ) THEN
754  i=part_offset+odd_size+1
755  CALL cscal( nrhs, cone/af( odd_size+2 ), b( i ), lldb )
756  ENDIF
757 *
758 * Call backsolve routine
759 *
760  IF( lsame( uplo, 'L' ) ) THEN
761 *
762  CALL pcpttrsv( 'L', 'C', n, nrhs, d( part_offset+1 ),
763  $ e( part_offset+1 ), ja_new, desca_1xp, b, ib,
764  $ descb_px1, af, laf, work, lwork, info )
765 *
766  ELSE
767 *
768  CALL pcpttrsv( 'U', 'N', n, nrhs, d( part_offset+1 ),
769  $ e( part_offset+1 ), ja_new, desca_1xp, b, ib,
770  $ descb_px1, af, laf, work, lwork, info )
771 *
772  ENDIF
773  1000 CONTINUE
774 *
775 *
776 * Free BLACS space used to hold standard-form grid.
777 *
778  IF( ictxt_save .NE. ictxt_new ) THEN
779  CALL blacs_gridexit( ictxt_new )
780  ENDIF
781 *
782  1234 CONTINUE
783 *
784 * Restore saved input parameters
785 *
786  ictxt = ictxt_save
787  np = np_save
788 *
789 * Output minimum worksize
790 *
791  work( 1 ) = work_size_min
792 *
793 *
794  RETURN
795 *
796 * End of PCPTTRS
797 *
798  END
globchk
subroutine globchk(ICTXT, N, X, LDX, IWORK, INFO)
Definition: pchkxmat.f:403
cmplx
float cmplx[2]
Definition: pblas.h:132
reshape
void reshape(int *context_in, int *major_in, int *context_out, int *major_out, int *first_proc, int *nprow_new, int *npcol_new)
Definition: reshape.c:77
desc_convert
subroutine desc_convert(DESC_IN, DESC_OUT, INFO)
Definition: desc_convert.f:2
pcpttrs
subroutine pcpttrs(UPLO, N, NRHS, D, E, JA, DESCA, B, IB, DESCB, AF, LAF, WORK, LWORK, INFO)
Definition: pcpttrs.f:3
pcpttrsv
subroutine pcpttrsv(UPLO, TRANS, N, NRHS, D, E, JA, DESCA, B, IB, DESCB, AF, LAF, WORK, LWORK, INFO)
Definition: pcpttrsv.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181