ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
psgbtrf.f
Go to the documentation of this file.
1  SUBROUTINE psgbtrf( N, BWL, BWU, A, JA, DESCA, IPIV, AF, LAF,
2  $ WORK, LWORK, INFO )
3 *
4 * -- ScaLAPACK routine (version 2.0.2) --
5 * Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver
6 * May 1 2012
7 *
8 * .. Scalar Arguments ..
9  INTEGER BWL, BWU, INFO, JA, LAF, LWORK, N
10 * ..
11 * .. Array Arguments ..
12  INTEGER DESCA( * ), IPIV( * )
13  REAL A( * ), AF( * ), WORK( * )
14 * ..
15 *
16 * Purpose
17 * =======
18 *
19 * PSGBTRF computes a LU factorization
20 * of an N-by-N real banded
21 * distributed matrix
22 * with bandwidth BWL, BWU: A(1:N, JA:JA+N-1).
23 * Reordering is used to increase parallelism in the factorization.
24 * This reordering results in factors that are DIFFERENT from those
25 * produced by equivalent sequential codes. These factors cannot
26 * be used directly by users; however, they can be used in
27 * subsequent calls to PSGBTRS to solve linear systems.
28 *
29 * The factorization has the form
30 *
31 * P A(1:N, JA:JA+N-1) Q = L U
32 *
33 * where U is a banded upper triangular matrix and L is banded
34 * lower triangular, and P and Q are permutation matrices.
35 * The matrix Q represents reordering of columns
36 * for parallelism's sake, while P represents
37 * reordering of rows for numerical stability using
38 * classic partial pivoting.
39 *
40 * =====================================================================
41 *
42 * Arguments
43 * =========
44 *
45 *
46 * N (global input) INTEGER
47 * The number of rows and columns to be operated on, i.e. the
48 * order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
49 *
50 * BWL (global input) INTEGER
51 * Number of subdiagonals. 0 <= BWL <= N-1
52 *
53 * BWU (global input) INTEGER
54 * Number of superdiagonals. 0 <= BWU <= N-1
55 *
56 * A (local input/local output) REAL pointer into
57 * local memory to an array with first dimension
58 * LLD_A >=(2*bwl+2*bwu+1) (stored in DESCA).
59 * On entry, this array contains the local pieces of the
60 * N-by-N unsymmetric banded distributed matrix
61 * A(1:N, JA:JA+N-1) to be factored.
62 * This local portion is stored in the packed banded format
63 * used in LAPACK. Please see the Notes below and the
64 * ScaLAPACK manual for more detail on the format of
65 * distributed matrices.
66 * On exit, this array contains information containing details
67 * of the factorization.
68 * Note that permutations are performed on the matrix, so that
69 * the factors returned are different from those returned
70 * by LAPACK.
71 *
72 * JA (global input) INTEGER
73 * The index in the global array A that points to the start of
74 * the matrix to be operated on (which may be either all of A
75 * or a submatrix of A).
76 *
77 * DESCA (global and local input) INTEGER array of dimension DLEN.
78 * if 1D type (DTYPE_A=501), DLEN >= 7;
79 * if 2D type (DTYPE_A=1), DLEN >= 9 .
80 * The array descriptor for the distributed matrix A.
81 * Contains information of mapping of A to memory. Please
82 * see NOTES below for full description and options.
83 *
84 * IPIV (local output) INTEGER array, dimension >= DESCA( NB ).
85 * Pivot indices for local factorizations.
86 * Users *should not* alter the contents between
87 * factorization and solve.
88 *
89 * AF (local output) REAL array, dimension LAF.
90 * Auxiliary Fillin Space.
91 * Fillin is created during the factorization routine
92 * PSGBTRF and this is stored in AF. If a linear system
93 * is to be solved using PSGBTRS after the factorization
94 * routine, AF *must not be altered* after the factorization.
95 *
96 * LAF (local input) INTEGER
97 * Size of user-input Auxiliary Fillin space AF. Must be >=
98 * (NB+bwu)*(bwl+bwu)+6*(bwl+bwu)*(bwl+2*bwu)
99 * If LAF is not large enough, an error code will be returned
100 * and the minimum acceptable size will be returned in AF( 1 )
101 *
102 * WORK (local workspace/local output)
103 * REAL temporary workspace. This space may
104 * be overwritten in between calls to routines. WORK must be
105 * the size given in LWORK.
106 * On exit, WORK( 1 ) contains the minimal LWORK.
107 *
108 * LWORK (local input or global input) INTEGER
109 * Size of user-input workspace WORK.
110 * If LWORK is too small, the minimal acceptable size will be
111 * returned in WORK(1) and an error code is returned. LWORK>=
112 * 1
113 *
114 * INFO (global output) INTEGER
115 * = 0: successful exit
116 * < 0: If the i-th argument is an array and the j-entry had
117 * an illegal value, then INFO = -(i*100+j), if the i-th
118 * argument is a scalar and had an illegal value, then
119 * INFO = -i.
120 * > 0: If INFO = K<=NPROCS, the submatrix stored on processor
121 * INFO and factored locally was not
122 * nonsingular, and
123 * the factorization was not completed.
124 * If INFO = K>NPROCS, the submatrix stored on processor
125 * INFO-NPROCS representing interactions with other
126 * processors was not
127 * nonsingular,
128 * and the factorization was not completed.
129 *
130 * =====================================================================
131 *
132 *
133 * Restrictions
134 * ============
135 *
136 * The following are restrictions on the input parameters. Some of these
137 * are temporary and will be removed in future releases, while others
138 * may reflect fundamental technical limitations.
139 *
140 * Non-cyclic restriction: VERY IMPORTANT!
141 * P*NB>= mod(JA-1,NB)+N.
142 * The mapping for matrices must be blocked, reflecting the nature
143 * of the divide and conquer algorithm as a task-parallel algorithm.
144 * This formula in words is: no processor may have more than one
145 * chunk of the matrix.
146 *
147 * Blocksize cannot be too small:
148 * If the matrix spans more than one processor, the following
149 * restriction on NB, the size of each block on each processor,
150 * must hold:
151 * NB >= (BWL+BWU)+1
152 * The bulk of parallel computation is done on the matrix of size
153 * O(NB) on each processor. If this is too small, divide and conquer
154 * is a poor choice of algorithm.
155 *
156 * Submatrix reference:
157 * JA = IB
158 * Alignment restriction that prevents unnecessary communication.
159 *
160 *
161 * =====================================================================
162 *
163 *
164 * Notes
165 * =====
166 *
167 * If the factorization routine and the solve routine are to be called
168 * separately (to solve various sets of righthand sides using the same
169 * coefficient matrix), the auxiliary space AF *must not be altered*
170 * between calls to the factorization routine and the solve routine.
171 *
172 * The best algorithm for solving banded and tridiagonal linear systems
173 * depends on a variety of parameters, especially the bandwidth.
174 * Currently, only algorithms designed for the case N/P >> bw are
175 * implemented. These go by many names, including Divide and Conquer,
176 * Partitioning, domain decomposition-type, etc.
177 *
178 * Algorithm description: Divide and Conquer
179 *
180 * The Divide and Conqer algorithm assumes the matrix is narrowly
181 * banded compared with the number of equations. In this situation,
182 * it is best to distribute the input matrix A one-dimensionally,
183 * with columns atomic and rows divided amongst the processes.
184 * The basic algorithm divides the banded matrix up into
185 * P pieces with one stored on each processor,
186 * and then proceeds in 2 phases for the factorization or 3 for the
187 * solution of a linear system.
188 * 1) Local Phase:
189 * The individual pieces are factored independently and in
190 * parallel. These factors are applied to the matrix creating
191 * fillin, which is stored in a non-inspectable way in auxiliary
192 * space AF. Mathematically, this is equivalent to reordering
193 * the matrix A as P A P^T and then factoring the principal
194 * leading submatrix of size equal to the sum of the sizes of
195 * the matrices factored on each processor. The factors of
196 * these submatrices overwrite the corresponding parts of A
197 * in memory.
198 * 2) Reduced System Phase:
199 * A small (max(bwl,bwu)* (P-1)) system is formed representing
200 * interaction of the larger blocks, and is stored (as are its
201 * factors) in the space AF. A parallel Block Cyclic Reduction
202 * algorithm is used. For a linear system, a parallel front solve
203 * followed by an analagous backsolve, both using the structure
204 * of the factored matrix, are performed.
205 * 3) Backsubsitution Phase:
206 * For a linear system, a local backsubstitution is performed on
207 * each processor in parallel.
208 *
209 *
210 * Descriptors
211 * ===========
212 *
213 * Descriptors now have *types* and differ from ScaLAPACK 1.0.
214 *
215 * Note: banded codes can use either the old two dimensional
216 * or new one-dimensional descriptors, though the processor grid in
217 * both cases *must be one-dimensional*. We describe both types below.
218 *
219 * Each global data object is described by an associated description
220 * vector. This vector stores the information required to establish
221 * the mapping between an object element and its corresponding process
222 * and memory location.
223 *
224 * Let A be a generic term for any 2D block cyclicly distributed array.
225 * Such a global array has an associated description vector DESCA.
226 * In the following comments, the character _ should be read as
227 * "of the global array".
228 *
229 * NOTATION STORED IN EXPLANATION
230 * --------------- -------------- --------------------------------------
231 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
232 * DTYPE_A = 1.
233 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
234 * the BLACS process grid A is distribu-
235 * ted over. The context itself is glo-
236 * bal, but the handle (the integer
237 * value) may vary.
238 * M_A (global) DESCA( M_ ) The number of rows in the global
239 * array A.
240 * N_A (global) DESCA( N_ ) The number of columns in the global
241 * array A.
242 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
243 * the rows of the array.
244 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
245 * the columns of the array.
246 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
247 * row of the array A is distributed.
248 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
249 * first column of the array A is
250 * distributed.
251 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
252 * array. LLD_A >= MAX(1,LOCr(M_A)).
253 *
254 * Let K be the number of rows or columns of a distributed matrix,
255 * and assume that its process grid has dimension p x q.
256 * LOCr( K ) denotes the number of elements of K that a process
257 * would receive if K were distributed over the p processes of its
258 * process column.
259 * Similarly, LOCc( K ) denotes the number of elements of K that a
260 * process would receive if K were distributed over the q processes of
261 * its process row.
262 * The values of LOCr() and LOCc() may be determined via a call to the
263 * ScaLAPACK tool function, NUMROC:
264 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
265 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
266 * An upper bound for these quantities may be computed by:
267 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
268 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
269 *
270 *
271 * One-dimensional descriptors:
272 *
273 * One-dimensional descriptors are a new addition to ScaLAPACK since
274 * version 1.0. They simplify and shorten the descriptor for 1D
275 * arrays.
276 *
277 * Since ScaLAPACK supports two-dimensional arrays as the fundamental
278 * object, we allow 1D arrays to be distributed either over the
279 * first dimension of the array (as if the grid were P-by-1) or the
280 * 2nd dimension (as if the grid were 1-by-P). This choice is
281 * indicated by the descriptor type (501 or 502)
282 * as described below.
283 *
284 * IMPORTANT NOTE: the actual BLACS grid represented by the
285 * CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
286 * irrespective of which one-dimensional descriptor type
287 * (501 or 502) is input.
288 * This routine will interpret the grid properly either way.
289 * ScaLAPACK routines *do not support intercontext operations* so that
290 * the grid passed to a single ScaLAPACK routine *must be the same*
291 * for all array descriptors passed to that routine.
292 *
293 * NOTE: In all cases where 1D descriptors are used, 2D descriptors
294 * may also be used, since a one-dimensional array is a special case
295 * of a two-dimensional array with one dimension of size unity.
296 * The two-dimensional array used in this case *must* be of the
297 * proper orientation:
298 * If the appropriate one-dimensional descriptor is DTYPEA=501
299 * (1 by P type), then the two dimensional descriptor must
300 * have a CTXT value that refers to a 1 by P BLACS grid;
301 * If the appropriate one-dimensional descriptor is DTYPEA=502
302 * (P by 1 type), then the two dimensional descriptor must
303 * have a CTXT value that refers to a P by 1 BLACS grid.
304 *
305 *
306 * Summary of allowed descriptors, types, and BLACS grids:
307 * DTYPE 501 502 1 1
308 * BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
309 * -----------------------------------------------------
310 * A OK NO OK NO
311 * B NO OK NO OK
312 *
313 * Let A be a generic term for any 1D block cyclicly distributed array.
314 * Such a global array has an associated description vector DESCA.
315 * In the following comments, the character _ should be read as
316 * "of the global array".
317 *
318 * NOTATION STORED IN EXPLANATION
319 * --------------- ---------- ------------------------------------------
320 * DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
321 * TYPE_A = 501: 1-by-P grid.
322 * TYPE_A = 502: P-by-1 grid.
323 * CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
324 * the BLACS process grid A is distribu-
325 * ted over. The context itself is glo-
326 * bal, but the handle (the integer
327 * value) may vary.
328 * N_A (global) DESCA( 3 ) The size of the array dimension being
329 * distributed.
330 * NB_A (global) DESCA( 4 ) The blocking factor used to distribute
331 * the distributed dimension of the array.
332 * SRC_A (global) DESCA( 5 ) The process row or column over which the
333 * first row or column of the array
334 * is distributed.
335 * LLD_A (local) DESCA( 6 ) The leading dimension of the local array
336 * storing the local blocks of the distri-
337 * buted array A. Minimum value of LLD_A
338 * depends on TYPE_A.
339 * TYPE_A = 501: LLD_A >=
340 * size of undistributed dimension, 1.
341 * TYPE_A = 502: LLD_A >=NB_A, 1.
342 * Reserved DESCA( 7 ) Reserved for future use.
343 *
344 * =====================================================================
345 *
346 * Implemented for ScaLAPACK by:
347 * Andrew J. Cleary, Livermore National Lab and University of Tenn.,
348 * and Markus Hegland, Australian National University. Feb., 1997.
349 * Based on code written by : Peter Arbenz, ETH Zurich, 1996.
350 * Last modified by: Peter Arbenz, Institute of Scientific Computing,
351 * ETH, Zurich.
352 *
353 * =====================================================================
354 *
355 * .. Parameters ..
356  REAL ONE
357  parameter( one = 1.0e+0 )
358  REAL ZERO
359  parameter( zero = 0.0e+0 )
360  INTEGER INT_ONE
361  parameter( int_one = 1 )
362  INTEGER DESCMULT, BIGNUM
363  parameter( descmult = 100, bignum = descmult*descmult )
364  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
365  $ lld_, mb_, m_, nb_, n_, rsrc_
366  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
367  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
368  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
369 * ..
370 * .. Local Scalars ..
371  INTEGER APTR, BBPTR, BIPTR, BM, BM1, BM2, BMN, BN, BW,
372  $ csrc, dbptr, first_proc, i, i1, i2, ictxt,
373  $ ictxt_new, ictxt_save, idum3, j, ja_new, jptr,
374  $ l, laf_min, lbwl, lbwu, ldb, ldbb, llda, lm,
375  $ lmj, ln, lnj, lptr, mycol, myrow, my_num_cols,
376  $ nb, neicol, np, npact, npcol, nprow, npstr,
377  $ np_save, nrhs, odd_n, odd_size, odptr, ofst,
378  $ part_offset, part_size, return_code, store_n_a,
379  $ work_size_min
380 * ..
381 * .. Local Arrays ..
382  INTEGER DESCA_1XP( 7 ), PARAM_CHECK( 9, 3 )
383 * ..
384 * .. External Subroutines ..
385  EXTERNAL blacs_gridexit, blacs_gridinfo, desc_convert,
386  $ sgbtrf, sgemm, sger, sgerv2d, sgesd2d, sgetrf,
387  $ slamov, slaswp, slatcpy, sswap, strrv2d,
388  $ strsd2d, strsm, globchk, igamx2d, igebr2d,
389  $ igebs2d, pxerbla, reshape
390 * ..
391 * .. External Functions ..
392  INTEGER NUMROC
393  EXTERNAL numroc
394 * ..
395 * .. Intrinsic Functions ..
396  INTRINSIC max, min, mod
397 * ..
398 * .. Executable Statements ..
399 *
400 *
401 * Test the input parameters
402 *
403  info = 0
404 *
405 * Convert descriptor into standard form for easy access to
406 * parameters, check that grid is of right shape.
407 *
408  desca_1xp( 1 ) = 501
409 *
410  CALL desc_convert( desca, desca_1xp, return_code )
411 *
412  IF( return_code.NE.0 ) THEN
413  info = -( 6*100+2 )
414  END IF
415 *
416 * Get values out of descriptor for use in code.
417 *
418  ictxt = desca_1xp( 2 )
419  csrc = desca_1xp( 5 )
420  nb = desca_1xp( 4 )
421  llda = desca_1xp( 6 )
422  store_n_a = desca_1xp( 3 )
423 *
424 * Get grid parameters
425 *
426 *
427  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
428  np = nprow*npcol
429 *
430 *
431 *
432  IF( lwork.LT.-1 ) THEN
433  info = -11
434  ELSE IF( lwork.EQ.-1 ) THEN
435  idum3 = -1
436  ELSE
437  idum3 = 1
438  END IF
439 *
440  IF( n.LT.0 ) THEN
441  info = -1
442  END IF
443 *
444  IF( n+ja-1.GT.store_n_a ) THEN
445  info = -( 6*100+6 )
446  END IF
447 *
448  IF( ( bwl.GT.n-1 ) .OR. ( bwl.LT.0 ) ) THEN
449  info = -2
450  END IF
451 *
452  IF( ( bwu.GT.n-1 ) .OR. ( bwu.LT.0 ) ) THEN
453  info = -3
454  END IF
455 *
456  IF( llda.LT.( 2*bwl+2*bwu+1 ) ) THEN
457  info = -( 6*100+6 )
458  END IF
459 *
460  IF( nb.LE.0 ) THEN
461  info = -( 6*100+4 )
462  END IF
463 *
464  bw = bwu + bwl
465 *
466 * Argument checking that is specific to Divide & Conquer routine
467 *
468  IF( nprow.NE.1 ) THEN
469  info = -( 6*100+2 )
470  END IF
471 *
472  IF( n.GT.np*nb-mod( ja-1, nb ) ) THEN
473  info = -( 1 )
474  CALL pxerbla( ictxt, 'PSGBTRF, D&C alg.: only 1 block per proc'
475  $ , -info )
476  RETURN
477  END IF
478 *
479  IF( ( ja+n-1.GT.nb ) .AND. ( nb.LT.( bwl+bwu+1 ) ) ) THEN
480  info = -( 6*100+4 )
481  CALL pxerbla( ictxt, 'PSGBTRF, D&C alg.: NB too small', -info )
482  RETURN
483  END IF
484 *
485 *
486 * Check auxiliary storage size
487 *
488  laf_min = ( nb+bwu )*( bwl+bwu ) + 6*( bwl+bwu )*( bwl+2*bwu )
489 *
490  IF( laf.LT.laf_min ) THEN
491  info = -9
492 * put minimum value of laf into AF( 1 )
493  af( 1 ) = laf_min
494  CALL pxerbla( ictxt, 'PSGBTRF: auxiliary storage error ',
495  $ -info )
496  RETURN
497  END IF
498 *
499 * Check worksize
500 *
501  work_size_min = 1
502 *
503  work( 1 ) = work_size_min
504 *
505  IF( lwork.LT.work_size_min ) THEN
506  IF( lwork.NE.-1 ) THEN
507  info = -11
508 * put minimum value of work into work( 1 )
509  work( 1 ) = work_size_min
510  CALL pxerbla( ictxt, 'PSGBTRF: worksize error ', -info )
511  END IF
512  RETURN
513  END IF
514 *
515 * Pack params and positions into arrays for global consistency check
516 *
517  param_check( 9, 1 ) = desca( 5 )
518  param_check( 8, 1 ) = desca( 4 )
519  param_check( 7, 1 ) = desca( 3 )
520  param_check( 6, 1 ) = desca( 1 )
521  param_check( 5, 1 ) = ja
522  param_check( 4, 1 ) = bwu
523  param_check( 3, 1 ) = bwl
524  param_check( 2, 1 ) = n
525  param_check( 1, 1 ) = idum3
526 *
527  param_check( 9, 2 ) = 605
528  param_check( 8, 2 ) = 604
529  param_check( 7, 2 ) = 603
530  param_check( 6, 2 ) = 601
531  param_check( 5, 2 ) = 5
532  param_check( 4, 2 ) = 3
533  param_check( 3, 2 ) = 2
534  param_check( 2, 2 ) = 1
535  param_check( 1, 2 ) = 11
536 *
537 * Want to find errors with MIN( ), so if no error, set it to a big
538 * number. If there already is an error, multiply by the the
539 * descriptor multiplier.
540 *
541  IF( info.GE.0 ) THEN
542  info = bignum
543  ELSE IF( info.LT.-descmult ) THEN
544  info = -info
545  ELSE
546  info = -info*descmult
547  END IF
548 *
549 * Check consistency across processors
550 *
551  CALL globchk( ictxt, 9, param_check, 9, param_check( 1, 3 ),
552  $ info )
553 *
554 * Prepare output: set info = 0 if no error, and divide by DESCMULT
555 * if error is not in a descriptor entry.
556 *
557  IF( info.EQ.bignum ) THEN
558  info = 0
559  ELSE IF( mod( info, descmult ).EQ.0 ) THEN
560  info = -info / descmult
561  ELSE
562  info = -info
563  END IF
564 *
565  IF( info.LT.0 ) THEN
566  CALL pxerbla( ictxt, 'PSGBTRF', -info )
567  RETURN
568  END IF
569 *
570 * Quick return if possible
571 *
572  IF( n.EQ.0 )
573  $ RETURN
574 *
575 *
576 * Adjust addressing into matrix space to properly get into
577 * the beginning part of the relevant data
578 *
579  part_offset = nb*( ( ja-1 ) / ( npcol*nb ) )
580 *
581  IF( ( mycol-csrc ).LT.( ja-part_offset-1 ) / nb ) THEN
582  part_offset = part_offset + nb
583  END IF
584 *
585  IF( mycol.LT.csrc ) THEN
586  part_offset = part_offset - nb
587  END IF
588 *
589 * Form a new BLACS grid (the "standard form" grid) with only procs
590 * holding part of the matrix, of size 1xNP where NP is adjusted,
591 * starting at csrc=0, with JA modified to reflect dropped procs.
592 *
593 * First processor to hold part of the matrix:
594 *
595  first_proc = mod( ( ja-1 ) / nb+csrc, npcol )
596 *
597 * Calculate new JA one while dropping off unused processors.
598 *
599  ja_new = mod( ja-1, nb ) + 1
600 *
601 * Save and compute new value of NP
602 *
603  np_save = np
604  np = ( ja_new+n-2 ) / nb + 1
605 *
606 * Call utility routine that forms "standard-form" grid
607 *
608  CALL reshape( ictxt, int_one, ictxt_new, int_one, first_proc,
609  $ int_one, np )
610 *
611 * Use new context from standard grid as context.
612 *
613  ictxt_save = ictxt
614  ictxt = ictxt_new
615  desca_1xp( 2 ) = ictxt_new
616 *
617 * Get information about new grid.
618 *
619  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
620 *
621 * Drop out processors that do not have part of the matrix.
622 *
623  IF( myrow.LT.0 ) THEN
624  GO TO 210
625  END IF
626 *
627 * ********************************
628 * Values reused throughout routine
629 *
630 * User-input value of partition size
631 *
632  part_size = nb
633 *
634 * Number of columns in each processor
635 *
636  my_num_cols = numroc( n, part_size, mycol, 0, npcol )
637 *
638 * Offset in columns to beginning of main partition in each proc
639 *
640  IF( mycol.EQ.0 ) THEN
641  part_offset = part_offset + mod( ja_new-1, part_size )
642  my_num_cols = my_num_cols - mod( ja_new-1, part_size )
643  END IF
644 *
645 * Offset in elements
646 *
647  ofst = part_offset*llda
648 *
649 * Size of main (or odd) partition in each processor
650 *
651  odd_size = numroc( n, part_size, mycol, 0, npcol )
652 *
653 *
654 * Zero out space for fillin
655 *
656  DO 10 i = 1, laf_min
657  af( i ) = zero
658  10 CONTINUE
659 *
660  DO 30 j = 1, odd_size
661  DO 20 i = 1, bw
662  a( i+( j-1 )*llda ) = zero
663  20 CONTINUE
664  30 CONTINUE
665 *
666 * Begin main code
667 *
668 ********************************************************************
669 * PHASE 1: Local computation phase.
670 ********************************************************************
671 *
672 *
673 * Transfer triangle B_i of local matrix to next processor
674 * for fillin. Overlap the send with the factorization of A_i.
675 *
676  IF( mycol.LE.npcol-2 ) THEN
677 *
678 * The last processor does not need to send anything.
679 * BIPTR = location of triangle B_i in memory
680  biptr = ( nb-bw )*llda + 2*bw + 1
681 *
682  CALL strsd2d( ictxt, 'U', 'N',
683  $ min( bw, bwu+numroc( n, nb, mycol+1, 0,
684  $ npcol ) ), bw, a( biptr ), llda-1, 0, mycol+1 )
685 *
686  END IF
687 *
688 * Factor main partition P_i A_i = L_i U_i on each processor
689 *
690 * LBWL, LBWU: lower and upper bandwidth of local solver
691 * Note that for MYCOL > 0 one has lower triangular blocks!
692 * LM is the number of rows which is usually NB except for
693 * MYCOL = 0 where it is BWU less and MYCOL=NPCOL-1 where it
694 * is NR+BWU where NR is the number of columns on the last processor
695 * Finally APTR is the pointer to the first element of A. As LAPACK
696 * has a slightly different matrix format than Scalapack the pointer
697 * has to be adjusted on processor MYCOL=0.
698 *
699  IF( mycol.NE.0 ) THEN
700  lbwl = bw
701  lbwu = 0
702  aptr = 1
703  ELSE
704  lbwl = bwl
705  lbwu = bwu
706  aptr = 1 + bwu
707  END IF
708 *
709  IF( mycol.NE.npcol-1 ) THEN
710  lm = nb - lbwu
711  ln = nb - bw
712  ELSE IF( mycol.NE.0 ) THEN
713  lm = odd_size + bwu
714  ln = max( odd_size-bw, 0 )
715  ELSE
716  lm = n
717  ln = max( n-bw, 0 )
718  END IF
719 *
720  IF( ln.GT.0 ) THEN
721 *
722  CALL sgbtrf( lm, ln, lbwl, lbwu, a( aptr ), llda, ipiv, info )
723 *
724  IF( info.NE.0 ) THEN
725  info = info + nb*mycol
726  GO TO 80
727  END IF
728 *
729  nrhs = bw
730  ldb = llda - 1
731 *
732 * Update the last BW columns of A_i (code modified from DGBTRS)
733 *
734 * Only the eliminations of unknowns > LN-BW have an effect on
735 * the last BW columns. Loop over them...
736 *
737  DO 40 j = max( ln-bw+1, 1 ), ln
738 *
739  lmj = min( lbwl, lm-j )
740  lnj = min( bw, j+bw-ln+aptr-1 )
741 *
742  l = ipiv( j )
743 *
744  jptr = j - ( ln+1 ) + 2*bw + 1 - lbwl + ln*llda
745 *
746  IF( l.NE.j ) THEN
747 *
748 * Element (L,LN+1) is swapped with element (J,LN+1) etc
749 * Furthermore, the elements in the same row are LDB=LLDA-1 apart
750 * The complicated formulas are to cope with the banded
751 * data format:
752 *
753  lptr = l - ( ln+1 ) + 2*bw + 1 - lbwl + ln*llda
754 *
755  CALL sswap( lnj, a( lptr ), ldb, a( jptr ), ldb )
756 *
757  END IF
758 *
759 * LPTR is the pointer to the beginning of the
760 * coefficients of L
761 *
762  lptr = bw + 1 + aptr + ( j-1 )*llda
763 *
764  CALL sger( lmj, lnj, -one, a( lptr ), 1, a( jptr ), ldb,
765  $ a( jptr+1 ), ldb )
766  40 CONTINUE
767 *
768  END IF
769 *
770 * Compute spike fill-in, L_i F_i = P_i B_{i-1}
771 *
772 * Receive triangle B_{i-1} from previous processor
773 *
774  IF( mycol.GT.0 ) THEN
775  CALL strrv2d( ictxt, 'U', 'N', min( bw, lm ), bw, af( 1 ), bw,
776  $ 0, mycol-1 )
777 *
778 * Transpose transmitted upper triangular (trapezoidal) matrix
779 *
780  DO 60 i2 = 1, min( bw, lm )
781  DO 50 i1 = i2 + 1, bw
782  af( i1+( i2-1 )*bw ) = af( i2+( i1-1 )*bw )
783  af( i2+( i1-1 )*bw ) = zero
784  50 CONTINUE
785  60 CONTINUE
786 *
787 * Permutation and forward elimination (triang. solve)
788 *
789  DO 70 j = 1, ln
790 *
791  lmj = min( lbwl, lm-j )
792  l = ipiv( j )
793 *
794  IF( l.NE.j ) THEN
795  CALL sswap( bw, af( ( l-1 )*bw+1 ), 1,
796  $ af( ( j-1 )*bw+1 ), 1 )
797  END IF
798 *
799  lptr = bw + 1 + aptr + ( j-1 )*llda
800 *
801  CALL sger( nrhs, lmj, -one, af( ( j-1 )*bw+1 ), 1,
802  $ a( lptr ), 1, af( j*bw+1 ), bw )
803 *
804  70 CONTINUE
805 *
806  END IF
807 *
808  80 CONTINUE
809 *
810 ********************************************************************
811 * PHASE 2: Formation and factorization of Reduced System.
812 ********************************************************************
813 *
814 * Define the initial dimensions of the diagonal blocks
815 * The offdiagonal blocks (for MYCOL > 0) are of size BM by BW
816 *
817  IF( mycol.NE.npcol-1 ) THEN
818  bm = bw - lbwu
819  bn = bw
820  ELSE
821  bm = min( bw, odd_size ) + bwu
822  bn = min( bw, odd_size )
823  END IF
824 *
825 * Pointer to first element of block bidiagonal matrix in AF
826 * Leading dimension of block bidiagonal system
827 *
828  bbptr = ( nb+bwu )*bw + 1
829  ldbb = 2*bw + bwu
830 *
831 * Copy from A and AF into block bidiagonal matrix (tail of AF)
832 *
833 * DBPTR = Pointer to diagonal blocks in A
834  dbptr = bw + 1 + lbwu + ln*llda
835 *
836  CALL slamov( 'G', bm, bn, a( dbptr ), llda-1, af( bbptr+bw*ldbb ),
837  $ ldbb )
838 *
839 * Zero out any junk entries that were copied
840 *
841  DO 100 j = 1, bm
842  DO 90 i = j + lbwl, bm - 1
843  af( bbptr+bw*ldbb+( j-1 )*ldbb+i ) = zero
844  90 CONTINUE
845  100 CONTINUE
846 *
847  IF( mycol.NE.0 ) THEN
848 *
849 * ODPTR = Pointer to offdiagonal blocks in A
850 *
851  odptr = ( lm-bm )*bw + 1
852  CALL slatcpy( 'G', bw, bm, af( odptr ), bw,
853  $ af( bbptr+2*bw*ldbb ), ldbb )
854  END IF
855 *
856  IF( npcol.EQ.1 ) THEN
857 *
858 * In this case the loop over the levels will not be
859 * performed.
860  CALL sgetrf( n-ln, n-ln, af( bbptr+bw*ldbb ), ldbb,
861  $ ipiv( ln+1 ), info )
862 *
863  END IF
864 *
865 * Loop over levels ... only occurs if npcol > 1
866 *
867 * The two integers NPACT (nu. of active processors) and NPSTR
868 * (stride between active processors) are used to control the
869 * loop.
870 *
871  npact = npcol
872  npstr = 1
873 *
874 * Begin loop over levels
875 *
876  110 CONTINUE
877  IF( npact.LE.1 )
878  $ GO TO 190
879 *
880 * Test if processor is active
881 *
882  IF( mod( mycol, npstr ).EQ.0 ) THEN
883 *
884 * Send/Receive blocks
885 *
886 *
887  IF( mod( mycol, 2*npstr ).EQ.0 ) THEN
888 *
889 * This node will potentially do more work later
890 *
891  neicol = mycol + npstr
892 *
893  IF( neicol / npstr.LT.npact-1 ) THEN
894  bmn = bw
895  ELSE IF( neicol / npstr.EQ.npact-1 ) THEN
896  odd_n = numroc( n, nb, npcol-1, 0, npcol )
897  bmn = min( bw, odd_n ) + bwu
898  ELSE
899 *
900 * Last processor skips to next level
901  GO TO 180
902  END IF
903 *
904 * BM1 = M for 1st block on proc pair, BM2 2nd block
905 *
906  bm1 = bm
907  bm2 = bmn
908 *
909  IF( neicol / npstr.LE.npact-1 ) THEN
910 *
911  CALL sgesd2d( ictxt, bm, 2*bw, af( bbptr+bw*ldbb ), ldbb,
912  $ 0, neicol )
913 *
914  CALL sgerv2d( ictxt, bmn, 2*bw, af( bbptr+bm ), ldbb, 0,
915  $ neicol )
916 *
917  IF( npact.EQ.2 ) THEN
918 *
919 * Copy diagonal block to align whole system
920 *
921  CALL slamov( 'G', bmn, bw, af( bbptr+bm ), ldbb,
922  $ af( bbptr+2*bw*ldbb+bm ), ldbb )
923  END IF
924 *
925  END IF
926 *
927  ELSE
928 *
929 * This node stops work after this stage -- an extra copy
930 * is required to make the odd and even frontal matrices
931 * look identical
932 *
933  neicol = mycol - npstr
934 *
935  IF( neicol.EQ.0 ) THEN
936  bmn = bw - bwu
937  ELSE
938  bmn = bw
939  END IF
940 *
941  bm1 = bmn
942  bm2 = bm
943 *
944  CALL sgesd2d( ictxt, bm, 2*bw, af( bbptr+bw*ldbb ), ldbb, 0,
945  $ neicol )
946 *
947  CALL slamov( 'G', bm, 2*bw, af( bbptr+bw*ldbb ), ldbb,
948  $ af( bbptr+bmn ), ldbb )
949 *
950  DO 130 j = bbptr + 2*bw*ldbb, bbptr + 3*bw*ldbb - 1, ldbb
951  DO 120 i = 0, ldbb - 1
952  af( i+j ) = zero
953  120 CONTINUE
954  130 CONTINUE
955 *
956  CALL sgerv2d( ictxt, bmn, 2*bw, af( bbptr+bw*ldbb ), ldbb,
957  $ 0, neicol )
958 *
959  IF( npact.EQ.2 ) THEN
960 *
961 * Copy diagonal block to align whole system
962 *
963  CALL slamov( 'G', bm, bw, af( bbptr+bmn ), ldbb,
964  $ af( bbptr+2*bw*ldbb+bmn ), ldbb )
965  END IF
966 *
967  END IF
968 *
969 * LU factorization with partial pivoting
970 *
971  IF( npact.NE.2 ) THEN
972 *
973  CALL sgetrf( bm+bmn, bw, af( bbptr+bw*ldbb ), ldbb,
974  $ ipiv( ln+1 ), info )
975 *
976 * Backsolve left side
977 *
978  DO 150 j = bbptr, bbptr + bw*ldbb - 1, ldbb
979  DO 140 i = 0, bm1 - 1
980  af( i+j ) = zero
981  140 CONTINUE
982  150 CONTINUE
983 *
984  CALL slaswp( bw, af( bbptr ), ldbb, 1, bw, ipiv( ln+1 ), 1 )
985 *
986  CALL strsm( 'L', 'L', 'N', 'U', bw, bw, one,
987  $ af( bbptr+bw*ldbb ), ldbb, af( bbptr ), ldbb )
988 *
989 * Use partial factors to update remainder
990 *
991  CALL sgemm( 'N', 'N', bm+bmn-bw, bw, bw, -one,
992  $ af( bbptr+bw*ldbb+bw ), ldbb, af( bbptr ), ldbb,
993  $ one, af( bbptr+bw ), ldbb )
994 *
995 * Backsolve right side
996 *
997  nrhs = bw
998 *
999  CALL slaswp( nrhs, af( bbptr+2*bw*ldbb ), ldbb, 1, bw,
1000  $ ipiv( ln+1 ), 1 )
1001 *
1002  CALL strsm( 'L', 'L', 'N', 'U', bw, nrhs, one,
1003  $ af( bbptr+bw*ldbb ), ldbb,
1004  $ af( bbptr+2*bw*ldbb ), ldbb )
1005 *
1006 * Use partial factors to update remainder
1007 *
1008  CALL sgemm( 'N', 'N', bm+bmn-bw, nrhs, bw, -one,
1009  $ af( bbptr+bw*ldbb+bw ), ldbb,
1010  $ af( bbptr+2*bw*ldbb ), ldbb, one,
1011  $ af( bbptr+2*bw*ldbb+bw ), ldbb )
1012 *
1013 *
1014 * Test if processor is active in next round
1015 *
1016  IF( mod( mycol, 2*npstr ).EQ.0 ) THEN
1017 *
1018 * Reset BM
1019 *
1020  bm = bm1 + bm2 - bw
1021 *
1022 * Local copying in the block bidiagonal area
1023 *
1024 *
1025  CALL slamov( 'G', bm, bw, af( bbptr+bw ), ldbb,
1026  $ af( bbptr+bw*ldbb ), ldbb )
1027  CALL slamov( 'G', bm, bw, af( bbptr+2*bw*ldbb+bw ), ldbb,
1028  $ af( bbptr+2*bw*ldbb ), ldbb )
1029 *
1030 * Zero out space that held original copy
1031 *
1032  DO 170 j = 0, bw - 1
1033  DO 160 i = 0, bm - 1
1034  af( bbptr+2*bw*ldbb+bw+j*ldbb+i ) = zero
1035  160 CONTINUE
1036  170 CONTINUE
1037 *
1038  END IF
1039 *
1040  ELSE
1041 *
1042 * Factor the final 2 by 2 block matrix
1043 *
1044  CALL sgetrf( bm+bmn, bm+bmn, af( bbptr+bw*ldbb ), ldbb,
1045  $ ipiv( ln+1 ), info )
1046  END IF
1047 *
1048  END IF
1049 *
1050 * Last processor in an odd-sized NPACT skips to here
1051 *
1052  180 CONTINUE
1053 *
1054  npact = ( npact+1 ) / 2
1055  npstr = npstr*2
1056  GO TO 110
1057 *
1058  190 CONTINUE
1059 * End loop over levels
1060 *
1061  200 CONTINUE
1062 * If error was found in Phase 1, processors jump here.
1063 *
1064 * Free BLACS space used to hold standard-form grid.
1065 *
1066  ictxt = ictxt_save
1067  IF( ictxt.NE.ictxt_new ) THEN
1068  CALL blacs_gridexit( ictxt_new )
1069  END IF
1070 *
1071  210 CONTINUE
1072 * If this processor did not hold part of the grid it
1073 * jumps here.
1074 *
1075 * Restore saved input parameters
1076 *
1077  ictxt = ictxt_save
1078  np = np_save
1079 *
1080 * Output worksize
1081 *
1082  work( 1 ) = work_size_min
1083 *
1084 * Make INFO consistent across processors
1085 *
1086  CALL igamx2d( ictxt, 'A', ' ', 1, 1, info, 1, info, info, -1, 0,
1087  $ 0 )
1088 *
1089  IF( mycol.EQ.0 ) THEN
1090  CALL igebs2d( ictxt, 'A', ' ', 1, 1, info, 1 )
1091  ELSE
1092  CALL igebr2d( ictxt, 'A', ' ', 1, 1, info, 1, 0, 0 )
1093  END IF
1094 *
1095 *
1096  RETURN
1097 *
1098 * End of PSGBTRF
1099 *
1100  END
globchk
subroutine globchk(ICTXT, N, X, LDX, IWORK, INFO)
Definition: pchkxmat.f:403
psgbtrf
subroutine psgbtrf(N, BWL, BWU, A, JA, DESCA, IPIV, AF, LAF, WORK, LWORK, INFO)
Definition: psgbtrf.f:3
max
#define max(A, B)
Definition: pcgemr.c:180
slatcpy
subroutine slatcpy(UPLO, M, N, A, LDA, B, LDB)
Definition: slatcpy.f:2
reshape
void reshape(int *context_in, int *major_in, int *context_out, int *major_out, int *first_proc, int *nprow_new, int *npcol_new)
Definition: reshape.c:77
desc_convert
subroutine desc_convert(DESC_IN, DESC_OUT, INFO)
Definition: desc_convert.f:2
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181