ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pdpttrf.f
Go to the documentation of this file.
1  SUBROUTINE pdpttrf( N, D, E, JA, DESCA, AF, LAF, WORK, LWORK,
2  $ INFO )
3 *
4 * -- ScaLAPACK routine (version 1.7) --
5 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
6 * and University of California, Berkeley.
7 * April 3, 2000
8 *
9 * .. Scalar Arguments ..
10  INTEGER INFO, JA, LAF, LWORK, N
11 * ..
12 * .. Array Arguments ..
13  INTEGER DESCA( * )
14  DOUBLE PRECISION AF( * ), D( * ), E( * ), WORK( * )
15 * ..
16 *
17 *
18 * Purpose
19 * =======
20 *
21 * PDPTTRF computes a Cholesky factorization
22 * of an N-by-N real tridiagonal
23 * symmetric positive definite distributed matrix
24 * A(1:N, JA:JA+N-1).
25 * Reordering is used to increase parallelism in the factorization.
26 * This reordering results in factors that are DIFFERENT from those
27 * produced by equivalent sequential codes. These factors cannot
28 * be used directly by users; however, they can be used in
29 * subsequent calls to PDPTTRS to solve linear systems.
30 *
31 * The factorization has the form
32 *
33 * P A(1:N, JA:JA+N-1) P^T = U' D U or
34 *
35 * P A(1:N, JA:JA+N-1) P^T = L D L',
36 *
37 * where U is a tridiagonal upper triangular matrix and L is tridiagonal
38 * lower triangular, and P is a permutation matrix.
39 *
40 * =====================================================================
41 *
42 * Arguments
43 * =========
44 *
45 *
46 * N (global input) INTEGER
47 * The number of rows and columns to be operated on, i.e. the
48 * order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
49 *
50 * D (local input/local output) DOUBLE PRECISION pointer to local
51 * part of global vector storing the main diagonal of the
52 * matrix.
53 * On exit, this array contains information containing the
54 * factors of the matrix.
55 * Must be of size >= DESCA( NB_ ).
56 *
57 * E (local input/local output) DOUBLE PRECISION pointer to local
58 * part of global vector storing the upper diagonal of the
59 * matrix. Globally, DU(n) is not referenced, and DU must be
60 * aligned with D.
61 * On exit, this array contains information containing the
62 * factors of the matrix.
63 * Must be of size >= DESCA( NB_ ).
64 *
65 * JA (global input) INTEGER
66 * The index in the global array A that points to the start of
67 * the matrix to be operated on (which may be either all of A
68 * or a submatrix of A).
69 *
70 * DESCA (global and local input) INTEGER array of dimension DLEN.
71 * if 1D type (DTYPE_A=501 or 502), DLEN >= 7;
72 * if 2D type (DTYPE_A=1), DLEN >= 9.
73 * The array descriptor for the distributed matrix A.
74 * Contains information of mapping of A to memory. Please
75 * see NOTES below for full description and options.
76 *
77 * AF (local output) DOUBLE PRECISION array, dimension LAF.
78 * Auxiliary Fillin Space.
79 * Fillin is created during the factorization routine
80 * PDPTTRF and this is stored in AF. If a linear system
81 * is to be solved using PDPTTRS after the factorization
82 * routine, AF *must not be altered* after the factorization.
83 *
84 * LAF (local input) INTEGER
85 * Size of user-input Auxiliary Fillin space AF. Must be >=
86 * (NB+2)
87 * If LAF is not large enough, an error code will be returned
88 * and the minimum acceptable size will be returned in AF( 1 )
89 *
90 * WORK (local workspace/local output)
91 * DOUBLE PRECISION temporary workspace. This space may
92 * be overwritten in between calls to routines. WORK must be
93 * the size given in LWORK.
94 * On exit, WORK( 1 ) contains the minimal LWORK.
95 *
96 * LWORK (local input or global input) INTEGER
97 * Size of user-input workspace WORK.
98 * If LWORK is too small, the minimal acceptable size will be
99 * returned in WORK(1) and an error code is returned. LWORK>=
100 * 8*NPCOL
101 *
102 * INFO (local output) INTEGER
103 * = 0: successful exit
104 * < 0: If the i-th argument is an array and the j-entry had
105 * an illegal value, then INFO = -(i*100+j), if the i-th
106 * argument is a scalar and had an illegal value, then
107 * INFO = -i.
108 * > 0: If INFO = K<=NPROCS, the submatrix stored on processor
109 * INFO and factored locally was not
110 * positive definite, and
111 * the factorization was not completed.
112 * If INFO = K>NPROCS, the submatrix stored on processor
113 * INFO-NPROCS representing interactions with other
114 * processors was not
115 * positive definite,
116 * and the factorization was not completed.
117 *
118 * =====================================================================
119 *
120 *
121 * Restrictions
122 * ============
123 *
124 * The following are restrictions on the input parameters. Some of these
125 * are temporary and will be removed in future releases, while others
126 * may reflect fundamental technical limitations.
127 *
128 * Non-cyclic restriction: VERY IMPORTANT!
129 * P*NB>= mod(JA-1,NB)+N.
130 * The mapping for matrices must be blocked, reflecting the nature
131 * of the divide and conquer algorithm as a task-parallel algorithm.
132 * This formula in words is: no processor may have more than one
133 * chunk of the matrix.
134 *
135 * Blocksize cannot be too small:
136 * If the matrix spans more than one processor, the following
137 * restriction on NB, the size of each block on each processor,
138 * must hold:
139 * NB >= 2
140 * The bulk of parallel computation is done on the matrix of size
141 * O(NB) on each processor. If this is too small, divide and conquer
142 * is a poor choice of algorithm.
143 *
144 * Submatrix reference:
145 * JA = IB
146 * Alignment restriction that prevents unnecessary communication.
147 *
148 *
149 * =====================================================================
150 *
151 *
152 * Notes
153 * =====
154 *
155 * If the factorization routine and the solve routine are to be called
156 * separately (to solve various sets of righthand sides using the same
157 * coefficient matrix), the auxiliary space AF *must not be altered*
158 * between calls to the factorization routine and the solve routine.
159 *
160 * The best algorithm for solving banded and tridiagonal linear systems
161 * depends on a variety of parameters, especially the bandwidth.
162 * Currently, only algorithms designed for the case N/P >> bw are
163 * implemented. These go by many names, including Divide and Conquer,
164 * Partitioning, domain decomposition-type, etc.
165 * For tridiagonal matrices, it is obvious: N/P >> bw(=1), and so D&C
166 * algorithms are the appropriate choice.
167 *
168 * Algorithm description: Divide and Conquer
169 *
170 * The Divide and Conqer algorithm assumes the matrix is narrowly
171 * banded compared with the number of equations. In this situation,
172 * it is best to distribute the input matrix A one-dimensionally,
173 * with columns atomic and rows divided amongst the processes.
174 * The basic algorithm divides the tridiagonal matrix up into
175 * P pieces with one stored on each processor,
176 * and then proceeds in 2 phases for the factorization or 3 for the
177 * solution of a linear system.
178 * 1) Local Phase:
179 * The individual pieces are factored independently and in
180 * parallel. These factors are applied to the matrix creating
181 * fillin, which is stored in a non-inspectable way in auxiliary
182 * space AF. Mathematically, this is equivalent to reordering
183 * the matrix A as P A P^T and then factoring the principal
184 * leading submatrix of size equal to the sum of the sizes of
185 * the matrices factored on each processor. The factors of
186 * these submatrices overwrite the corresponding parts of A
187 * in memory.
188 * 2) Reduced System Phase:
189 * A small ((P-1)) system is formed representing
190 * interaction of the larger blocks, and is stored (as are its
191 * factors) in the space AF. A parallel Block Cyclic Reduction
192 * algorithm is used. For a linear system, a parallel front solve
193 * followed by an analagous backsolve, both using the structure
194 * of the factored matrix, are performed.
195 * 3) Backsubsitution Phase:
196 * For a linear system, a local backsubstitution is performed on
197 * each processor in parallel.
198 *
199 *
200 * Descriptors
201 * ===========
202 *
203 * Descriptors now have *types* and differ from ScaLAPACK 1.0.
204 *
205 * Note: tridiagonal codes can use either the old two dimensional
206 * or new one-dimensional descriptors, though the processor grid in
207 * both cases *must be one-dimensional*. We describe both types below.
208 *
209 * Each global data object is described by an associated description
210 * vector. This vector stores the information required to establish
211 * the mapping between an object element and its corresponding process
212 * and memory location.
213 *
214 * Let A be a generic term for any 2D block cyclicly distributed array.
215 * Such a global array has an associated description vector DESCA.
216 * In the following comments, the character _ should be read as
217 * "of the global array".
218 *
219 * NOTATION STORED IN EXPLANATION
220 * --------------- -------------- --------------------------------------
221 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
222 * DTYPE_A = 1.
223 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
224 * the BLACS process grid A is distribu-
225 * ted over. The context itself is glo-
226 * bal, but the handle (the integer
227 * value) may vary.
228 * M_A (global) DESCA( M_ ) The number of rows in the global
229 * array A.
230 * N_A (global) DESCA( N_ ) The number of columns in the global
231 * array A.
232 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
233 * the rows of the array.
234 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
235 * the columns of the array.
236 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
237 * row of the array A is distributed.
238 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
239 * first column of the array A is
240 * distributed.
241 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
242 * array. LLD_A >= MAX(1,LOCr(M_A)).
243 *
244 * Let K be the number of rows or columns of a distributed matrix,
245 * and assume that its process grid has dimension p x q.
246 * LOCr( K ) denotes the number of elements of K that a process
247 * would receive if K were distributed over the p processes of its
248 * process column.
249 * Similarly, LOCc( K ) denotes the number of elements of K that a
250 * process would receive if K were distributed over the q processes of
251 * its process row.
252 * The values of LOCr() and LOCc() may be determined via a call to the
253 * ScaLAPACK tool function, NUMROC:
254 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
255 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
256 * An upper bound for these quantities may be computed by:
257 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
258 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
259 *
260 *
261 * One-dimensional descriptors:
262 *
263 * One-dimensional descriptors are a new addition to ScaLAPACK since
264 * version 1.0. They simplify and shorten the descriptor for 1D
265 * arrays.
266 *
267 * Since ScaLAPACK supports two-dimensional arrays as the fundamental
268 * object, we allow 1D arrays to be distributed either over the
269 * first dimension of the array (as if the grid were P-by-1) or the
270 * 2nd dimension (as if the grid were 1-by-P). This choice is
271 * indicated by the descriptor type (501 or 502)
272 * as described below.
273 * However, for tridiagonal matrices, since the objects being
274 * distributed are the individual vectors storing the diagonals, we
275 * have adopted the convention that both the P-by-1 descriptor and
276 * the 1-by-P descriptor are allowed and are equivalent for
277 * tridiagonal matrices. Thus, for tridiagonal matrices,
278 * DTYPE_A = 501 or 502 can be used interchangeably
279 * without any other change.
280 * We require that the distributed vectors storing the diagonals of a
281 * tridiagonal matrix be aligned with each other. Because of this, a
282 * single descriptor, DESCA, serves to describe the distribution of
283 * of all diagonals simultaneously.
284 *
285 * IMPORTANT NOTE: the actual BLACS grid represented by the
286 * CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
287 * irrespective of which one-dimensional descriptor type
288 * (501 or 502) is input.
289 * This routine will interpret the grid properly either way.
290 * ScaLAPACK routines *do not support intercontext operations* so that
291 * the grid passed to a single ScaLAPACK routine *must be the same*
292 * for all array descriptors passed to that routine.
293 *
294 * NOTE: In all cases where 1D descriptors are used, 2D descriptors
295 * may also be used, since a one-dimensional array is a special case
296 * of a two-dimensional array with one dimension of size unity.
297 * The two-dimensional array used in this case *must* be of the
298 * proper orientation:
299 * If the appropriate one-dimensional descriptor is DTYPEA=501
300 * (1 by P type), then the two dimensional descriptor must
301 * have a CTXT value that refers to a 1 by P BLACS grid;
302 * If the appropriate one-dimensional descriptor is DTYPEA=502
303 * (P by 1 type), then the two dimensional descriptor must
304 * have a CTXT value that refers to a P by 1 BLACS grid.
305 *
306 *
307 * Summary of allowed descriptors, types, and BLACS grids:
308 * DTYPE 501 502 1 1
309 * BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
310 * -----------------------------------------------------
311 * A OK OK OK NO
312 * B NO OK NO OK
313 *
314 * Let A be a generic term for any 1D block cyclicly distributed array.
315 * Such a global array has an associated description vector DESCA.
316 * In the following comments, the character _ should be read as
317 * "of the global array".
318 *
319 * NOTATION STORED IN EXPLANATION
320 * --------------- ---------- ------------------------------------------
321 * DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
322 * TYPE_A = 501: 1-by-P grid.
323 * TYPE_A = 502: P-by-1 grid.
324 * CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
325 * the BLACS process grid A is distribu-
326 * ted over. The context itself is glo-
327 * bal, but the handle (the integer
328 * value) may vary.
329 * N_A (global) DESCA( 3 ) The size of the array dimension being
330 * distributed.
331 * NB_A (global) DESCA( 4 ) The blocking factor used to distribute
332 * the distributed dimension of the array.
333 * SRC_A (global) DESCA( 5 ) The process row or column over which the
334 * first row or column of the array
335 * is distributed.
336 * Ignored DESCA( 6 ) Ignored for tridiagonal matrices.
337 * Reserved DESCA( 7 ) Reserved for future use.
338 *
339 *
340 *
341 * =====================================================================
342 *
343 * Code Developer: Andrew J. Cleary, University of Tennessee.
344 * Current address: Lawrence Livermore National Labs.
345 *
346 * =====================================================================
347 *
348 * .. Parameters ..
349  DOUBLE PRECISION ONE
350  parameter( one = 1.0d+0 )
351  DOUBLE PRECISION ZERO
352  parameter( zero = 0.0d+0 )
353  INTEGER INT_ONE
354  parameter( int_one = 1 )
355  INTEGER DESCMULT, BIGNUM
356  parameter( descmult = 100, bignum = descmult*descmult )
357  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
358  $ lld_, mb_, m_, nb_, n_, rsrc_
359  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
360  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
361  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
362 * ..
363 * .. Local Scalars ..
364  INTEGER COMM_PROC, CSRC, FIRST_PROC, I, ICTXT,
365  $ ictxt_new, ictxt_save, idum3, int_temp, ja_new,
366  $ laf_min, level_dist, llda, mycol, myrow,
367  $ my_num_cols, nb, np, npcol, nprow, np_save,
368  $ odd_size, part_offset, part_size, return_code,
369  $ store_n_a, temp, work_size_min
370 * ..
371 * .. Local Arrays ..
372  INTEGER DESCA_1XP( 7 ), PARAM_CHECK( 7, 3 )
373 * ..
374 * .. External Subroutines ..
375  EXTERNAL blacs_gridexit, blacs_gridinfo, desc_convert,
376  $ dgerv2d, dgesd2d, dpttrf, dpttrsv, dtrrv2d,
377  $ dtrsd2d, globchk, igamx2d, igebr2d, igebs2d,
378  $ pxerbla, reshape
379 * ..
380 * .. External Functions ..
381  INTEGER NUMROC
382  EXTERNAL numroc
383 * ..
384 * .. Intrinsic Functions ..
385  INTRINSIC dble, mod
386 * ..
387 * .. Executable Statements ..
388 *
389 * Test the input parameters
390 *
391  info = 0
392 *
393 * Convert descriptor into standard form for easy access to
394 * parameters, check that grid is of right shape.
395 *
396  desca_1xp( 1 ) = 501
397 *
398  temp = desca( dtype_ )
399  IF( temp.EQ.502 ) THEN
400 * Temporarily set the descriptor type to 1xP type
401  desca( dtype_ ) = 501
402  END IF
403 *
404  CALL desc_convert( desca, desca_1xp, return_code )
405 *
406  desca( dtype_ ) = temp
407 *
408  IF( return_code.NE.0 ) THEN
409  info = -( 5*100+2 )
410  END IF
411 *
412 * Get values out of descriptor for use in code.
413 *
414  ictxt = desca_1xp( 2 )
415  csrc = desca_1xp( 5 )
416  nb = desca_1xp( 4 )
417  llda = desca_1xp( 6 )
418  store_n_a = desca_1xp( 3 )
419 *
420 * Get grid parameters
421 *
422 *
423  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
424  np = nprow*npcol
425 *
426 *
427 *
428  IF( lwork.LT.-1 ) THEN
429  info = -9
430  ELSE IF( lwork.EQ.-1 ) THEN
431  idum3 = -1
432  ELSE
433  idum3 = 1
434  END IF
435 *
436  IF( n.LT.0 ) THEN
437  info = -1
438  END IF
439 *
440  IF( n+ja-1.GT.store_n_a ) THEN
441  info = -( 5*100+6 )
442  END IF
443 *
444 * Argument checking that is specific to Divide & Conquer routine
445 *
446  IF( nprow.NE.1 ) THEN
447  info = -( 5*100+2 )
448  END IF
449 *
450  IF( n.GT.np*nb-mod( ja-1, nb ) ) THEN
451  info = -( 1 )
452  CALL pxerbla( ictxt, 'PDPTTRF, D&C alg.: only 1 block per proc'
453  $ , -info )
454  RETURN
455  END IF
456 *
457  IF( ( ja+n-1.GT.nb ) .AND. ( nb.LT.2*int_one ) ) THEN
458  info = -( 5*100+4 )
459  CALL pxerbla( ictxt, 'PDPTTRF, D&C alg.: NB too small', -info )
460  RETURN
461  END IF
462 *
463 *
464 * Check auxiliary storage size
465 *
466  laf_min = ( 12*npcol+3*nb )
467 *
468  IF( laf.LT.laf_min ) THEN
469  info = -7
470 * put minimum value of laf into AF( 1 )
471  af( 1 ) = laf_min
472  CALL pxerbla( ictxt, 'PDPTTRF: auxiliary storage error ',
473  $ -info )
474  RETURN
475  END IF
476 *
477 * Check worksize
478 *
479  work_size_min = 8*npcol
480 *
481  work( 1 ) = work_size_min
482 *
483  IF( lwork.LT.work_size_min ) THEN
484  IF( lwork.NE.-1 ) THEN
485  info = -9
486  CALL pxerbla( ictxt, 'PDPTTRF: worksize error ', -info )
487  END IF
488  RETURN
489  END IF
490 *
491 * Pack params and positions into arrays for global consistency check
492 *
493  param_check( 7, 1 ) = desca( 5 )
494  param_check( 6, 1 ) = desca( 4 )
495  param_check( 5, 1 ) = desca( 3 )
496  param_check( 4, 1 ) = desca( 1 )
497  param_check( 3, 1 ) = ja
498  param_check( 2, 1 ) = n
499  param_check( 1, 1 ) = idum3
500 *
501  param_check( 7, 2 ) = 505
502  param_check( 6, 2 ) = 504
503  param_check( 5, 2 ) = 503
504  param_check( 4, 2 ) = 501
505  param_check( 3, 2 ) = 4
506  param_check( 2, 2 ) = 1
507  param_check( 1, 2 ) = 9
508 *
509 * Want to find errors with MIN( ), so if no error, set it to a big
510 * number. If there already is an error, multiply by the the
511 * descriptor multiplier.
512 *
513  IF( info.GE.0 ) THEN
514  info = bignum
515  ELSE IF( info.LT.-descmult ) THEN
516  info = -info
517  ELSE
518  info = -info*descmult
519  END IF
520 *
521 * Check consistency across processors
522 *
523  CALL globchk( ictxt, 7, param_check, 7, param_check( 1, 3 ),
524  $ info )
525 *
526 * Prepare output: set info = 0 if no error, and divide by DESCMULT
527 * if error is not in a descriptor entry.
528 *
529  IF( info.EQ.bignum ) THEN
530  info = 0
531  ELSE IF( mod( info, descmult ).EQ.0 ) THEN
532  info = -info / descmult
533  ELSE
534  info = -info
535  END IF
536 *
537  IF( info.LT.0 ) THEN
538  CALL pxerbla( ictxt, 'PDPTTRF', -info )
539  RETURN
540  END IF
541 *
542 * Quick return if possible
543 *
544  IF( n.EQ.0 )
545  $ RETURN
546 *
547 *
548 * Adjust addressing into matrix space to properly get into
549 * the beginning part of the relevant data
550 *
551  part_offset = nb*( ( ja-1 ) / ( npcol*nb ) )
552 *
553  IF( ( mycol-csrc ).LT.( ja-part_offset-1 ) / nb ) THEN
554  part_offset = part_offset + nb
555  END IF
556 *
557  IF( mycol.LT.csrc ) THEN
558  part_offset = part_offset - nb
559  END IF
560 *
561 * Form a new BLACS grid (the "standard form" grid) with only procs
562 * holding part of the matrix, of size 1xNP where NP is adjusted,
563 * starting at csrc=0, with JA modified to reflect dropped procs.
564 *
565 * First processor to hold part of the matrix:
566 *
567  first_proc = mod( ( ja-1 ) / nb+csrc, npcol )
568 *
569 * Calculate new JA one while dropping off unused processors.
570 *
571  ja_new = mod( ja-1, nb ) + 1
572 *
573 * Save and compute new value of NP
574 *
575  np_save = np
576  np = ( ja_new+n-2 ) / nb + 1
577 *
578 * Call utility routine that forms "standard-form" grid
579 *
580  CALL reshape( ictxt, int_one, ictxt_new, int_one, first_proc,
581  $ int_one, np )
582 *
583 * Use new context from standard grid as context.
584 *
585  ictxt_save = ictxt
586  ictxt = ictxt_new
587  desca_1xp( 2 ) = ictxt_new
588 *
589 * Get information about new grid.
590 *
591  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
592 *
593 * Drop out processors that do not have part of the matrix.
594 *
595  IF( myrow.LT.0 ) THEN
596  GO TO 90
597  END IF
598 *
599 * ********************************
600 * Values reused throughout routine
601 *
602 * User-input value of partition size
603 *
604  part_size = nb
605 *
606 * Number of columns in each processor
607 *
608  my_num_cols = numroc( n, part_size, mycol, 0, npcol )
609 *
610 * Offset in columns to beginning of main partition in each proc
611 *
612  IF( mycol.EQ.0 ) THEN
613  part_offset = part_offset + mod( ja_new-1, part_size )
614  my_num_cols = my_num_cols - mod( ja_new-1, part_size )
615  END IF
616 *
617 * Size of main (or odd) partition in each processor
618 *
619  odd_size = my_num_cols
620  IF( mycol.LT.np-1 ) THEN
621  odd_size = odd_size - int_one
622  END IF
623 *
624 *
625 * Zero out space for fillin
626 *
627  DO 10 i = 1, laf_min
628  af( i ) = zero
629  10 CONTINUE
630 *
631 * Begin main code
632 *
633 *
634 ********************************************************************
635 * PHASE 1: Local computation phase.
636 ********************************************************************
637 *
638 *
639  IF( mycol.LT.np-1 ) THEN
640 * Transfer last triangle D_i of local matrix to next processor
641 * which needs it to calculate fillin due to factorization of
642 * its main (odd) block A_i.
643 * Overlap the send with the factorization of A_i.
644 *
645  CALL dtrsd2d( ictxt, 'U', 'N', 1, 1,
646  $ e( part_offset+odd_size+1 ), llda-1, 0, mycol+1 )
647 *
648  END IF
649 *
650 *
651 * Factor main partition A_i = L_i {L_i}^T in each processor
652 * Or A_i = {U_i}^T {U_i} if E is the upper superdiagonal
653 *
654  CALL dpttrf( odd_size, d( part_offset+1 ), e( part_offset+1 ),
655  $ info )
656 *
657  IF( info.NE.0 ) THEN
658  info = mycol + 1
659  GO TO 20
660  END IF
661 *
662 *
663  IF( mycol.LT.np-1 ) THEN
664 * Apply factorization to odd-even connection block B_i
665 *
666 *
667 * Perform the triangular system solve {L_i}{{B'}_i}^T = {B_i}^T
668 * by dividing B_i by diagonal element
669 *
670  e( part_offset+odd_size ) = e( part_offset+odd_size ) /
671  $ d( part_offset+odd_size )
672 *
673 *
674 *
675 * Compute contribution to diagonal block(s) of reduced system.
676 * {C'}_i = {C_i}-{{B'}_i}{{B'}_i}^T
677 *
678  d( part_offset+odd_size+1 ) = d( part_offset+odd_size+1 ) -
679  $ d( part_offset+odd_size )*
680  $ ( e( part_offset+odd_size )*
681  $ ( e( part_offset+odd_size ) ) )
682 *
683  END IF
684 * End of "if ( MYCOL .lt. NP-1 )..." loop
685 *
686 *
687  20 CONTINUE
688 * If the processor could not locally factor, it jumps here.
689 *
690  IF( mycol.NE.0 ) THEN
691 *
692 * Receive previously transmitted matrix section, which forms
693 * the right-hand-side for the triangular solve that calculates
694 * the "spike" fillin.
695 *
696 *
697  CALL dtrrv2d( ictxt, 'U', 'N', 1, 1, af( 1 ), odd_size, 0,
698  $ mycol-1 )
699 *
700  IF( info.EQ.0 ) THEN
701 *
702 * Calculate the "spike" fillin, ${L_i} {{G}_i}^T = {D_i}$ .
703 *
704  CALL dpttrsv( 'N', odd_size, int_one, d( part_offset+1 ),
705  $ e( part_offset+1 ), af( 1 ), odd_size, info )
706 *
707 * Divide by D
708 *
709  DO 30 i = 1, odd_size
710  af( i ) = af( i ) / d( part_offset+i )
711  30 CONTINUE
712 *
713 *
714 * Calculate the update block for previous proc, E_i = G_i{G_i}^T
715 *
716 *
717 * Since there is no element-by-element vector multiplication in
718 * the BLAS, this loop must be hardwired in without a BLAS call
719 *
720  int_temp = odd_size*int_one + 2 + 1
721  af( int_temp ) = 0
722 *
723  DO 40 i = 1, odd_size
724  af( int_temp ) = af( int_temp ) -
725  $ d( part_offset+i )*( af( i )*
726  $ ( af( i ) ) )
727  40 CONTINUE
728 *
729 *
730 * Initiate send of E_i to previous processor to overlap
731 * with next computation.
732 *
733  CALL dgesd2d( ictxt, int_one, int_one, af( odd_size+3 ),
734  $ int_one, 0, mycol-1 )
735 *
736 *
737  IF( mycol.LT.np-1 ) THEN
738 *
739 * Calculate off-diagonal block(s) of reduced system.
740 * Note: for ease of use in solution of reduced system, store
741 * L's off-diagonal block in transpose form.
742 * {F_i}^T = {H_i}{{B'}_i}^T
743 *
744  af( odd_size+1 ) = -d( part_offset+odd_size )*
745  $ ( e( part_offset+odd_size )*
746  $ af( odd_size ) )
747 *
748 *
749  END IF
750 *
751  END IF
752 * End of "if ( MYCOL .ne. 0 )..."
753 *
754  END IF
755 * End of "if (info.eq.0) then"
756 *
757 *
758 * Check to make sure no processors have found errors
759 *
760  CALL igamx2d( ictxt, 'A', ' ', 1, 1, info, 1, info, info, -1, 0,
761  $ 0 )
762 *
763  IF( mycol.EQ.0 ) THEN
764  CALL igebs2d( ictxt, 'A', ' ', 1, 1, info, 1 )
765  ELSE
766  CALL igebr2d( ictxt, 'A', ' ', 1, 1, info, 1, 0, 0 )
767  END IF
768 *
769  IF( info.NE.0 ) THEN
770  GO TO 80
771  END IF
772 * No errors found, continue
773 *
774 *
775 ********************************************************************
776 * PHASE 2: Formation and factorization of Reduced System.
777 ********************************************************************
778 *
779 * Gather up local sections of reduced system
780 *
781 *
782 * The last processor does not participate in the factorization of
783 * the reduced system, having sent its E_i already.
784  IF( mycol.EQ.npcol-1 ) THEN
785  GO TO 70
786  END IF
787 *
788 * Initiate send of off-diag block(s) to overlap with next part.
789 * Off-diagonal block needed on neighboring processor to start
790 * algorithm.
791 *
792  IF( ( mod( mycol+1, 2 ).EQ.0 ) .AND. ( mycol.GT.0 ) ) THEN
793 *
794  CALL dgesd2d( ictxt, int_one, int_one, af( odd_size+1 ),
795  $ int_one, 0, mycol-1 )
796 *
797  END IF
798 *
799 * Copy last diagonal block into AF storage for subsequent
800 * operations.
801 *
802  af( odd_size+2 ) = dble( d( part_offset+odd_size+1 ) )
803 *
804 * Receive cont. to diagonal block that is stored on this proc.
805 *
806  IF( mycol.LT.npcol-1 ) THEN
807 *
808  CALL dgerv2d( ictxt, int_one, int_one, af( odd_size+2+1 ),
809  $ int_one, 0, mycol+1 )
810 *
811 * Add contribution to diagonal block
812 *
813  af( odd_size+2 ) = af( odd_size+2 ) + af( odd_size+3 )
814 *
815  END IF
816 *
817 *
818 * *************************************
819 * Modification Loop
820 *
821 * The distance for sending and receiving for each level starts
822 * at 1 for the first level.
823  level_dist = 1
824 *
825 * Do until this proc is needed to modify other procs' equations
826 *
827  50 CONTINUE
828  IF( mod( ( mycol+1 ) / level_dist, 2 ).NE.0 )
829  $ GO TO 60
830 *
831 * Receive and add contribution to diagonal block from the left
832 *
833  IF( mycol-level_dist.GE.0 ) THEN
834  CALL dgerv2d( ictxt, int_one, int_one, work( 1 ), int_one, 0,
835  $ mycol-level_dist )
836 *
837  af( odd_size+2 ) = af( odd_size+2 ) + work( 1 )
838 *
839  END IF
840 *
841 * Receive and add contribution to diagonal block from the right
842 *
843  IF( mycol+level_dist.LT.npcol-1 ) THEN
844  CALL dgerv2d( ictxt, int_one, int_one, work( 1 ), int_one, 0,
845  $ mycol+level_dist )
846 *
847  af( odd_size+2 ) = af( odd_size+2 ) + work( 1 )
848 *
849  END IF
850 *
851  level_dist = level_dist*2
852 *
853  GO TO 50
854  60 CONTINUE
855 * [End of GOTO Loop]
856 *
857 *
858 * *********************************
859 * Calculate and use this proc's blocks to modify other procs'...
860  IF( af( odd_size+2 ).EQ.zero ) THEN
861  info = npcol + mycol
862  END IF
863 *
864 * ****************************************************************
865 * Receive offdiagonal block from processor to right.
866 * If this is the first group of processors, the receive comes
867 * from a different processor than otherwise.
868 *
869  IF( level_dist.EQ.1 ) THEN
870  comm_proc = mycol + 1
871 *
872 * Move block into place that it will be expected to be for
873 * calcs.
874 *
875  af( odd_size+3 ) = af( odd_size+1 )
876 *
877  ELSE
878  comm_proc = mycol + level_dist / 2
879  END IF
880 *
881  IF( mycol / level_dist.LE.( npcol-1 ) / level_dist-2 ) THEN
882 *
883  CALL dgerv2d( ictxt, int_one, int_one, af( odd_size+1 ),
884  $ int_one, 0, comm_proc )
885 *
886  IF( info.EQ.0 ) THEN
887 *
888 *
889 * Modify upper off_diagonal block with diagonal block
890 *
891 *
892  af( odd_size+1 ) = af( odd_size+1 ) / af( odd_size+2 )
893 *
894  END IF
895 * End of "if ( info.eq.0 ) then"
896 *
897 * Calculate contribution from this block to next diagonal block
898 *
899  work( 1 ) = -one*af( odd_size+1 )*af( odd_size+2 )*
900  $ ( af( odd_size+1 ) )
901 *
902 * Send contribution to diagonal block's owning processor.
903 *
904  CALL dgesd2d( ictxt, int_one, int_one, work( 1 ), int_one, 0,
905  $ mycol+level_dist )
906 *
907  END IF
908 * End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
909 *
910 *
911 * ****************************************************************
912 * Receive off_diagonal block from left and use to finish with this
913 * processor.
914 *
915  IF( ( mycol / level_dist.GT.0 ) .AND.
916  $ ( mycol / level_dist.LE.( npcol-1 ) / level_dist-1 ) ) THEN
917 *
918  IF( level_dist.GT.1 ) THEN
919 *
920 * Receive offdiagonal block(s) from proc level_dist/2 to the
921 * left
922 *
923  CALL dgerv2d( ictxt, int_one, int_one, af( odd_size+2+1 ),
924  $ int_one, 0, mycol-level_dist / 2 )
925 *
926  END IF
927 *
928 *
929  IF( info.EQ.0 ) THEN
930 *
931 * Use diagonal block(s) to modify this offdiagonal block
932 *
933  af( odd_size+3 ) = ( af( odd_size+3 ) ) / af( odd_size+2 )
934 *
935  END IF
936 * End of "if( info.eq.0 ) then"
937 *
938 * Use offdiag block(s) to calculate modification to diag block
939 * of processor to the left
940 *
941  work( 1 ) = -one*af( odd_size+3 )*af( odd_size+2 )*
942  $ ( af( odd_size+3 ) )
943 *
944 * Send contribution to diagonal block's owning processor.
945 *
946  CALL dgesd2d( ictxt, int_one, int_one, work( 1 ), int_one, 0,
947  $ mycol-level_dist )
948 *
949 * *******************************************************
950 *
951  IF( mycol / level_dist.LE.( npcol-1 ) / level_dist-2 ) THEN
952 *
953 * Decide which processor offdiagonal block(s) goes to
954 *
955  IF( ( mod( mycol / ( 2*level_dist ), 2 ) ).EQ.0 ) THEN
956  comm_proc = mycol + level_dist
957  ELSE
958  comm_proc = mycol - level_dist
959  END IF
960 *
961 * Use offdiagonal blocks to calculate offdiag
962 * block to send to neighboring processor. Depending
963 * on circumstances, may need to transpose the matrix.
964 *
965  work( 1 ) = -one*af( odd_size+3 )*af( odd_size+2 )*
966  $ af( odd_size+1 )
967 *
968 * Send contribution to offdiagonal block's owning processor.
969 *
970  CALL dgesd2d( ictxt, int_one, int_one, work( 1 ), int_one,
971  $ 0, comm_proc )
972 *
973  END IF
974 *
975  END IF
976 * End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
977 *
978  70 CONTINUE
979 *
980 *
981  80 CONTINUE
982 *
983 *
984 * Free BLACS space used to hold standard-form grid.
985 *
986  IF( ictxt_save.NE.ictxt_new ) THEN
987  CALL blacs_gridexit( ictxt_new )
988  END IF
989 *
990  90 CONTINUE
991 *
992 * Restore saved input parameters
993 *
994  ictxt = ictxt_save
995  np = np_save
996 *
997 * Output minimum worksize
998 *
999  work( 1 ) = work_size_min
1000 *
1001 * Make INFO consistent across processors
1002 *
1003  CALL igamx2d( ictxt, 'A', ' ', 1, 1, info, 1, info, info, -1, 0,
1004  $ 0 )
1005 *
1006  IF( mycol.EQ.0 ) THEN
1007  CALL igebs2d( ictxt, 'A', ' ', 1, 1, info, 1 )
1008  ELSE
1009  CALL igebr2d( ictxt, 'A', ' ', 1, 1, info, 1, 0, 0 )
1010  END IF
1011 *
1012 *
1013  RETURN
1014 *
1015 * End of PDPTTRF
1016 *
1017  END
globchk
subroutine globchk(ICTXT, N, X, LDX, IWORK, INFO)
Definition: pchkxmat.f:403
reshape
void reshape(int *context_in, int *major_in, int *context_out, int *major_out, int *first_proc, int *nprow_new, int *npcol_new)
Definition: reshape.c:77
desc_convert
subroutine desc_convert(DESC_IN, DESC_OUT, INFO)
Definition: desc_convert.f:2
pdpttrf
subroutine pdpttrf(N, D, E, JA, DESCA, AF, LAF, WORK, LWORK, INFO)
Definition: pdpttrf.f:3
dpttrsv
subroutine dpttrsv(TRANS, N, NRHS, D, E, B, LDB, INFO)
Definition: dpttrsv.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2