ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pzpbmv1.f
Go to the documentation of this file.
1  SUBROUTINE pzpbdcmv( LDBW, BW, UPLO, N, A, JA, DESCA, NRHS, B, IB,
2  $ DESCB, X, WORK, LWORK, INFO )
3 *
4 *
5 *
6 * -- ScaLAPACK routine (version 1.7) --
7 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
8 * and University of California, Berkeley.
9 * November 15, 1997
10 *
11 * .. Scalar Arguments ..
12  CHARACTER UPLO
13  INTEGER BW, IB, INFO, JA, LDBW, LWORK, N, NRHS
14 * ..
15 * .. Array Arguments ..
16  INTEGER DESCA( * ), DESCB( * )
17  COMPLEX*16 A( * ), B( * ), WORK( * ), X( * )
18 * ..
19 *
20 *
21 * Purpose
22 * =======
23 *
24 *
25 * =====================================================================
26 *
27 * Arguments
28 * =========
29 *
30 * UPLO (global input) CHARACTER
31 * = 'U': Upper triangle of A(1:N, JA:JA+N-1) is stored;
32 * = 'L': Lower triangle of A(1:N, JA:JA+N-1) is stored.
33 *
34 * N (global input) INTEGER
35 * The number of rows and columns to be operated on, i.e. the
36 * order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
37 *
38 * BW (global input) INTEGER
39 * Number of subdiagonals in L or U. 0 <= BW <= N-1
40 *
41 * A (local input/local output) COMPLEX*16 pointer into
42 * local memory to an array with first dimension
43 * LLD_A >=(bw+1) (stored in DESCA).
44 * On entry, this array contains the local pieces of the
45 * This local portion is stored in the packed banded format
46 * used in LAPACK. Please see the Notes below and the
47 * ScaLAPACK manual for more detail on the format of
48 * distributed matrices.
49 *
50 * JA (global input) INTEGER
51 * The index in the global array A that points to the start of
52 * the matrix to be operated on (which may be either all of A
53 * or a submatrix of A).
54 *
55 * DESCA (global and local input) INTEGER array of dimension DLEN.
56 * if 1D type (DTYPE_A=501), DLEN >= 7;
57 * if 2D type (DTYPE_A=1), DLEN >= 9 .
58 * The array descriptor for the distributed matrix A.
59 * Contains information of mapping of A to memory. Please
60 * see NOTES below for full description and options.
61 *
62 * AF (local output) COMPLEX*16 array, dimension LAF.
63 * Auxiliary Fillin Space.
64 * Fillin is created during the factorization routine
65 * PZPBTRF and this is stored in AF. If a linear system
66 * is to be solved using PZPBTRS after the factorization
67 * routine, AF *must not be altered* after the factorization.
68 *
69 * LAF (local input) INTEGER
70 * Size of user-input Auxiliary Fillin space AF. Must be >=
71 * (NB+2*bw)*bw
72 * If LAF is not large enough, an error code will be returned
73 * and the minimum acceptable size will be returned in AF( 1 )
74 *
75 * WORK (local workspace/local output)
76 * COMPLEX*16 temporary workspace. This space may
77 * be overwritten in between calls to routines. WORK must be
78 * the size given in LWORK.
79 * On exit, WORK( 1 ) contains the minimal LWORK.
80 *
81 * LWORK (local input or global input) INTEGER
82 * Size of user-input workspace WORK.
83 * If LWORK is too small, the minimal acceptable size will be
84 * returned in WORK(1) and an error code is returned. LWORK>=
85 *
86 * INFO (global output) INTEGER
87 * = 0: successful exit
88 * < 0: If the i-th argument is an array and the j-entry had
89 * an illegal value, then INFO = -(i*100+j), if the i-th
90 * argument is a scalar and had an illegal value, then
91 * INFO = -i.
92 *
93 * =====================================================================
94 *
95 *
96 * Restrictions
97 * ============
98 *
99 * The following are restrictions on the input parameters. Some of these
100 * are temporary and will be removed in future releases, while others
101 * may reflect fundamental technical limitations.
102 *
103 * Non-cyclic restriction: VERY IMPORTANT!
104 * P*NB>= mod(JA-1,NB)+N.
105 * The mapping for matrices must be blocked, reflecting the nature
106 * of the divide and conquer algorithm as a task-parallel algorithm.
107 * This formula in words is: no processor may have more than one
108 * chunk of the matrix.
109 *
110 * Blocksize cannot be too small:
111 * If the matrix spans more than one processor, the following
112 * restriction on NB, the size of each block on each processor,
113 * must hold:
114 * NB >= 2*BW
115 * The bulk of parallel computation is done on the matrix of size
116 * O(NB) on each processor. If this is too small, divide and conquer
117 * is a poor choice of algorithm.
118 *
119 * Submatrix reference:
120 * JA = IB
121 * Alignment restriction that prevents unnecessary communication.
122 *
123 *
124 * =====================================================================
125 *
126 *
127 * Notes
128 * =====
129 *
130 * If the factorization routine and the solve routine are to be called
131 * separately (to solve various sets of righthand sides using the same
132 * coefficient matrix), the auxiliary space AF *must not be altered*
133 * between calls to the factorization routine and the solve routine.
134 *
135 * The best algorithm for solving banded and tridiagonal linear systems
136 * depends on a variety of parameters, especially the bandwidth.
137 * Currently, only algorithms designed for the case N/P >> bw are
138 * implemented. These go by many names, including Divide and Conquer,
139 * Partitioning, domain decomposition-type, etc.
140 *
141 * Algorithm description: Divide and Conquer
142 *
143 * The Divide and Conqer algorithm assumes the matrix is narrowly
144 * banded compared with the number of equations. In this situation,
145 * it is best to distribute the input matrix A one-dimensionally,
146 * with columns atomic and rows divided amongst the processes.
147 * The basic algorithm divides the banded matrix up into
148 * P pieces with one stored on each processor,
149 * and then proceeds in 2 phases for the factorization or 3 for the
150 * solution of a linear system.
151 * 1) Local Phase:
152 * The individual pieces are factored independently and in
153 * parallel. These factors are applied to the matrix creating
154 * fillin, which is stored in a non-inspectable way in auxiliary
155 * space AF. Mathematically, this is equivalent to reordering
156 * the matrix A as P A P^T and then factoring the principal
157 * leading submatrix of size equal to the sum of the sizes of
158 * the matrices factored on each processor. The factors of
159 * these submatrices overwrite the corresponding parts of A
160 * in memory.
161 * 2) Reduced System Phase:
162 * A small (BW* (P-1)) system is formed representing
163 * interaction of the larger blocks, and is stored (as are its
164 * factors) in the space AF. A parallel Block Cyclic Reduction
165 * algorithm is used. For a linear system, a parallel front solve
166 * followed by an analagous backsolve, both using the structure
167 * of the factored matrix, are performed.
168 * 3) Backsubsitution Phase:
169 * For a linear system, a local backsubstitution is performed on
170 * each processor in parallel.
171 *
172 *
173 * Descriptors
174 * ===========
175 *
176 * Descriptors now have *types* and differ from ScaLAPACK 1.0.
177 *
178 * Note: banded codes can use either the old two dimensional
179 * or new one-dimensional descriptors, though the processor grid in
180 * both cases *must be one-dimensional*. We describe both types below.
181 *
182 * Each global data object is described by an associated description
183 * vector. This vector stores the information required to establish
184 * the mapping between an object element and its corresponding process
185 * and memory location.
186 *
187 * Let A be a generic term for any 2D block cyclicly distributed array.
188 * Such a global array has an associated description vector DESCA.
189 * In the following comments, the character _ should be read as
190 * "of the global array".
191 *
192 * NOTATION STORED IN EXPLANATION
193 * --------------- -------------- --------------------------------------
194 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
195 * DTYPE_A = 1.
196 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
197 * the BLACS process grid A is distribu-
198 * ted over. The context itself is glo-
199 * bal, but the handle (the integer
200 * value) may vary.
201 * M_A (global) DESCA( M_ ) The number of rows in the global
202 * array A.
203 * N_A (global) DESCA( N_ ) The number of columns in the global
204 * array A.
205 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
206 * the rows of the array.
207 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
208 * the columns of the array.
209 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
210 * row of the array A is distributed.
211 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
212 * first column of the array A is
213 * distributed.
214 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
215 * array. LLD_A >= MAX(1,LOCr(M_A)).
216 *
217 * Let K be the number of rows or columns of a distributed matrix,
218 * and assume that its process grid has dimension p x q.
219 * LOCr( K ) denotes the number of elements of K that a process
220 * would receive if K were distributed over the p processes of its
221 * process column.
222 * Similarly, LOCc( K ) denotes the number of elements of K that a
223 * process would receive if K were distributed over the q processes of
224 * its process row.
225 * The values of LOCr() and LOCc() may be determined via a call to the
226 * ScaLAPACK tool function, NUMROC:
227 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
228 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
229 * An upper bound for these quantities may be computed by:
230 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
231 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
232 *
233 *
234 * One-dimensional descriptors:
235 *
236 * One-dimensional descriptors are a new addition to ScaLAPACK since
237 * version 1.0. They simplify and shorten the descriptor for 1D
238 * arrays.
239 *
240 * Since ScaLAPACK supports two-dimensional arrays as the fundamental
241 * object, we allow 1D arrays to be distributed either over the
242 * first dimension of the array (as if the grid were P-by-1) or the
243 * 2nd dimension (as if the grid were 1-by-P). This choice is
244 * indicated by the descriptor type (501 or 502)
245 * as described below.
246 *
247 * IMPORTANT NOTE: the actual BLACS grid represented by the
248 * CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
249 * irrespective of which one-dimensional descriptor type
250 * (501 or 502) is input.
251 * This routine will interpret the grid properly either way.
252 * ScaLAPACK routines *do not support intercontext operations* so that
253 * the grid passed to a single ScaLAPACK routine *must be the same*
254 * for all array descriptors passed to that routine.
255 *
256 * NOTE: In all cases where 1D descriptors are used, 2D descriptors
257 * may also be used, since a one-dimensional array is a special case
258 * of a two-dimensional array with one dimension of size unity.
259 * The two-dimensional array used in this case *must* be of the
260 * proper orientation:
261 * If the appropriate one-dimensional descriptor is DTYPEA=501
262 * (1 by P type), then the two dimensional descriptor must
263 * have a CTXT value that refers to a 1 by P BLACS grid;
264 * If the appropriate one-dimensional descriptor is DTYPEA=502
265 * (P by 1 type), then the two dimensional descriptor must
266 * have a CTXT value that refers to a P by 1 BLACS grid.
267 *
268 *
269 * Summary of allowed descriptors, types, and BLACS grids:
270 * DTYPE 501 502 1 1
271 * BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
272 * -----------------------------------------------------
273 * A OK NO OK NO
274 * B NO OK NO OK
275 *
276 * Let A be a generic term for any 1D block cyclicly distributed array.
277 * Such a global array has an associated description vector DESCA.
278 * In the following comments, the character _ should be read as
279 * "of the global array".
280 *
281 * NOTATION STORED IN EXPLANATION
282 * --------------- ---------- ------------------------------------------
283 * DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
284 * TYPE_A = 501: 1-by-P grid.
285 * TYPE_A = 502: P-by-1 grid.
286 * CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
287 * the BLACS process grid A is distribu-
288 * ted over. The context itself is glo-
289 * bal, but the handle (the integer
290 * value) may vary.
291 * N_A (global) DESCA( 3 ) The size of the array dimension being
292 * distributed.
293 * NB_A (global) DESCA( 4 ) The blocking factor used to distribute
294 * the distributed dimension of the array.
295 * SRC_A (global) DESCA( 5 ) The process row or column over which the
296 * first row or column of the array
297 * is distributed.
298 * LLD_A (local) DESCA( 6 ) The leading dimension of the local array
299 * storing the local blocks of the distri-
300 * buted array A. Minimum value of LLD_A
301 * depends on TYPE_A.
302 * TYPE_A = 501: LLD_A >=
303 * size of undistributed dimension, 1.
304 * TYPE_A = 502: LLD_A >=NB_A, 1.
305 * Reserved DESCA( 7 ) Reserved for future use.
306 *
307 *
308 *
309 * =====================================================================
310 *
311 * Code Developer: Andrew J. Cleary, University of Tennessee.
312 * Current address: Lawrence Livermore National Labs.
313 * This version released: August, 2001.
314 *
315 * =====================================================================
316 *
317 * ..
318 * .. Parameters ..
319  DOUBLE PRECISION ONE, ZERO
320  parameter( one = 1.0d+0 )
321  parameter( zero = 0.0d+0 )
322  COMPLEX*16 CONE, CZERO
323  parameter( cone = ( 1.0d+0, 0.0d+0 ) )
324  parameter( czero = ( 0.0d+0, 0.0d+0 ) )
325  INTEGER INT_ONE
326  parameter( int_one = 1 )
327  INTEGER DESCMULT, BIGNUM
328  parameter(descmult = 100, bignum = descmult * descmult)
329  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
330  $ lld_, mb_, m_, nb_, n_, rsrc_
331  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
332  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
333  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
334 * ..
335 * .. Local Scalars ..
336  INTEGER CSRC, DL_N_M, DL_N_N, DL_P_M, DL_P_N,
337  $ first_proc, i, ictxt, ictxt_new, ictxt_save,
338  $ idum1, idum3, j, ja_new, llda, lldb, mycol,
339  $ myrow, my_num_cols, nb, np, npcol, nprow,
340  $ np_save, odd_size, ofst, part_offset,
341  $ part_size, store_m_b, store_n_a
342  INTEGER NUMROC_SIZE
343 * ..
344 * .. Local Arrays ..
345  INTEGER PARAM_CHECK( 16, 3 )
346 * ..
347 * .. External Subroutines ..
348  EXTERNAL blacs_gridinfo, pxerbla, reshape
349 * ..
350 * .. External Functions ..
351  LOGICAL LSAME
352  INTEGER NUMROC
353  EXTERNAL lsame, numroc
354 * ..
355 * .. Intrinsic Functions ..
356  INTRINSIC ichar, min, mod
357 * ..
358 * .. Executable Statements ..
359 *
360 * Test the input parameters
361 *
362  info = 0
363 *
364  ictxt = desca( ctxt_ )
365  csrc = desca( csrc_ )
366  nb = desca( nb_ )
367  llda = desca( lld_ )
368  store_n_a = desca( n_ )
369  lldb = descb( lld_ )
370  store_m_b = descb( m_ )
371 *
372 *
373 * Pre-calculate bw^2
374 *
375 *
376  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
377  np = nprow * npcol
378 *
379 *
380 *
381  IF( lsame( uplo, 'U' ) ) THEN
382  idum1 = ichar( 'U' )
383  ELSE IF ( lsame( uplo, 'L' ) ) THEN
384  idum1 = ichar( 'L' )
385  ELSE
386  info = -1
387  END IF
388 *
389  IF( lwork .LT. -1) THEN
390  info = -14
391  ELSE IF ( lwork .EQ. -1 ) THEN
392  idum3 = -1
393  ELSE
394  idum3 = 1
395  ENDIF
396 *
397  IF( n .LT. 0 ) THEN
398  info = -2
399  ENDIF
400 *
401  IF( n+ja-1 .GT. store_n_a ) THEN
402  info = -( 7*100 + 6 )
403  ENDIF
404 *
405  IF(( bw .GT. n-1 ) .OR.
406  $ ( bw .LT. 0 ) ) THEN
407  info = -3
408  ENDIF
409 *
410  IF( llda .LT. (bw+1) ) THEN
411  info = -( 7*100 + 6 )
412  ENDIF
413 *
414  IF( nb .LE. 0 ) THEN
415  info = -( 7*100 + 4 )
416  ENDIF
417 *
418 * Argument checking that is specific to Divide & Conquer routine
419 *
420  IF( nprow .NE. 1 ) THEN
421  info = -( 7*100+2 )
422  ENDIF
423 *
424  IF( n .GT. np*nb-mod( ja-1, nb )) THEN
425  info = -( 2 )
426  CALL pxerbla( ictxt,
427  $ 'PZPBDCMV, D&C alg.: only 1 block per proc',
428  $ -info )
429  RETURN
430  ENDIF
431 *
432  IF((ja+n-1.GT.nb) .AND. ( nb.LT.2*bw )) THEN
433  info = -( 7*100+4 )
434  CALL pxerbla( ictxt,
435  $ 'PZPBDCMV, D&C alg.: NB too small',
436  $ -info )
437  RETURN
438  ENDIF
439 *
440 *
441 * Pack params and positions into arrays for global consistency check
442 *
443  param_check( 16, 1 ) = descb(5)
444  param_check( 15, 1 ) = descb(4)
445  param_check( 14, 1 ) = descb(3)
446  param_check( 13, 1 ) = descb(2)
447  param_check( 12, 1 ) = descb(1)
448  param_check( 11, 1 ) = ib
449  param_check( 10, 1 ) = desca(5)
450  param_check( 9, 1 ) = desca(4)
451  param_check( 8, 1 ) = desca(3)
452  param_check( 7, 1 ) = desca(1)
453  param_check( 6, 1 ) = ja
454  param_check( 5, 1 ) = nrhs
455  param_check( 4, 1 ) = bw
456  param_check( 3, 1 ) = n
457  param_check( 2, 1 ) = idum3
458  param_check( 1, 1 ) = idum1
459 *
460  param_check( 16, 2 ) = 1005
461  param_check( 15, 2 ) = 1004
462  param_check( 14, 2 ) = 1003
463  param_check( 13, 2 ) = 1002
464  param_check( 12, 2 ) = 1001
465  param_check( 11, 2 ) = 9
466  param_check( 10, 2 ) = 705
467  param_check( 9, 2 ) = 704
468  param_check( 8, 2 ) = 703
469  param_check( 7, 2 ) = 701
470  param_check( 6, 2 ) = 6
471  param_check( 5, 2 ) = 4
472  param_check( 4, 2 ) = 3
473  param_check( 3, 2 ) = 2
474  param_check( 2, 2 ) = 14
475  param_check( 1, 2 ) = 1
476 *
477 * Want to find errors with MIN( ), so if no error, set it to a big
478 * number. If there already is an error, multiply by the the
479 * descriptor multiplier.
480 *
481  IF( info.GE.0 ) THEN
482  info = bignum
483  ELSE IF( info.LT.-descmult ) THEN
484  info = -info
485  ELSE
486  info = -info * descmult
487  END IF
488 *
489 * Check consistency across processors
490 *
491  CALL globchk( ictxt, 16, param_check, 16,
492  $ param_check( 1, 3 ), info )
493 *
494 * Prepare output: set info = 0 if no error, and divide by DESCMULT
495 * if error is not in a descriptor entry.
496 *
497  IF( info.EQ.bignum ) THEN
498  info = 0
499  ELSE IF( mod( info, descmult ) .EQ. 0 ) THEN
500  info = -info / descmult
501  ELSE
502  info = -info
503  END IF
504 *
505  IF( info.LT.0 ) THEN
506  CALL pxerbla( ictxt, 'PZPBDCMV', -info )
507  RETURN
508  END IF
509 *
510 * Quick return if possible
511 *
512  IF( n.EQ.0 )
513  $ RETURN
514 *
515 *
516 * Adjust addressing into matrix space to properly get into
517 * the beginning part of the relevant data
518 *
519  part_offset = nb*( (ja-1)/(npcol*nb) )
520 *
521  IF ( (mycol-csrc) .LT. (ja-part_offset-1)/nb ) THEN
522  part_offset = part_offset + nb
523  ENDIF
524 *
525  IF ( mycol .LT. csrc ) THEN
526  part_offset = part_offset - nb
527  ENDIF
528 *
529 * Form a new BLACS grid (the "standard form" grid) with only procs
530 * holding part of the matrix, of size 1xNP where NP is adjusted,
531 * starting at csrc=0, with JA modified to reflect dropped procs.
532 *
533 * First processor to hold part of the matrix:
534 *
535  first_proc = mod( ( ja-1 )/nb+csrc, npcol )
536 *
537 * Calculate new JA one while dropping off unused processors.
538 *
539  ja_new = mod( ja-1, nb ) + 1
540 *
541 * Save and compute new value of NP
542 *
543  np_save = np
544  np = ( ja_new+n-2 )/nb + 1
545 *
546 * Call utility routine that forms "standard-form" grid
547 *
548  CALL reshape( ictxt, int_one, ictxt_new, int_one,
549  $ first_proc, int_one, np )
550 *
551 * Use new context from standard grid as context.
552 *
553  ictxt_save = ictxt
554  ictxt = ictxt_new
555 *
556 * Get information about new grid.
557 *
558  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
559 *
560 * Drop out processors that do not have part of the matrix.
561 *
562  IF( myrow .LT. 0 ) THEN
563  GOTO 1234
564  ENDIF
565 *
566 * ********************************
567 * Values reused throughout routine
568 *
569 * User-input value of partition size
570 *
571  part_size = nb
572 *
573 * Number of columns in each processor
574 *
575  my_num_cols = numroc( n, part_size, mycol, 0, npcol )
576 *
577 * Offset in columns to beginning of main partition in each proc
578 *
579  IF ( mycol .EQ. 0 ) THEN
580  part_offset = part_offset+mod( ja_new-1, part_size )
581  my_num_cols = my_num_cols - mod(ja_new-1, part_size )
582  ENDIF
583 *
584 * Offset in elements
585 *
586  ofst = part_offset*llda
587 *
588 * Size of main (or odd) partition in each processor
589 *
590  odd_size = my_num_cols
591  IF ( mycol .LT. np-1 ) THEN
592  odd_size = odd_size - bw
593  ENDIF
594 *
595 *
596 *
597 * Zero out solution to use to accumulate answer
598 *
599  numroc_size =
600  $ numroc( n, part_size, mycol, 0, npcol)
601 *
602  DO 2279 j=1,nrhs
603  DO 4502 i=1,numroc_size
604  x( (j-1)*lldb + i ) = czero
605  4502 CONTINUE
606  2279 CONTINUE
607 *
608  DO 5642 i=1, (bw+2)*bw
609  work( i ) = czero
610  5642 CONTINUE
611 *
612 * Begin main code
613 *
614 *
615 **************************************
616 *
617  IF ( lsame( uplo, 'L' ) ) THEN
618 *
619 * Sizes of the extra triangles communicated bewtween processors
620 *
621  IF( mycol .GT. 0 ) THEN
622 *
623  dl_p_m= min( bw,
624  $ numroc( n, part_size, mycol, 0, npcol ) )
625  dl_p_n= min( bw,
626  $ numroc( n, part_size, mycol-1, 0, npcol ) )
627  ENDIF
628 *
629  IF( mycol .LT. npcol-1 ) THEN
630 *
631  dl_n_m= min( bw,
632  $ numroc( n, part_size, mycol+1, 0, npcol ) )
633  dl_n_n= min( bw,
634  $ numroc( n, part_size, mycol, 0, npcol ) )
635  ENDIF
636 *
637 *
638  IF( mycol .LT. npcol-1 ) THEN
639 * ...must send triangle in upper half of matrix to right
640 *
641 * Transpose triangle in preparation for sending
642 *
643  CALL zlatcpy( 'U', bw, bw,
644  $ a( llda*( numroc_size-bw )+1+bw ),
645  $ llda-1, work( 1 ), bw )
646 *
647 * Send the triangle to neighboring processor to right
648 *
649  CALL ztrsd2d(ictxt, 'L', 'N',
650  $ bw, bw,
651  $ work( 1 ),
652  $ bw, myrow, mycol+1 )
653 *
654  ENDIF
655 *
656 * Use main partition in each processor to multiply locally
657 *
658  CALL zhbmv( 'L', numroc_size, bw, cone, a( ofst+1 ), llda,
659  $ b(part_offset+1), 1, czero, x( part_offset+1 ), 1 )
660 *
661 *
662 *
663  IF ( mycol .LT. npcol-1 ) THEN
664 *
665 * Do the multiplication of the triangle in the lower half
666 *
667  CALL zcopy( dl_n_n,
668  $ b( numroc_size-dl_n_n+1 ),
669  $ 1, work( bw*bw+1+bw-dl_n_n ), 1 )
670 *
671  CALL ztrmv( 'U', 'N', 'N', bw,
672  $ a( llda*( numroc_size-bw )+1+bw ), llda-1,
673  $ work( bw*bw+1 ), 1)
674 *
675 * Zero out extraneous elements caused by TRMV if any
676 *
677  IF( dl_n_m .GT. dl_n_n ) THEN
678  DO 10 i = dl_n_m-dl_n_n, dl_n_m
679  work( bw*bw+i ) = 0
680  10 CONTINUE
681  ENDIF
682 *
683 * Send the result to the neighbor
684 *
685  CALL zgesd2d( ictxt, bw, 1,
686  $ work( bw*bw+1 ), bw, myrow, mycol+1 )
687 *
688  ENDIF
689 *
690  IF ( mycol .GT. 0 ) THEN
691 *
692  DO 20 i=1, bw*( bw+2 )
693  work( i ) = czero
694  20 CONTINUE
695 *
696 * Do the multiplication of the triangle in the upper half
697 *
698 * Copy vector to workspace
699 *
700  CALL zcopy( dl_p_m, b( 1 ), 1,
701  $ work( bw*bw+1 ), 1)
702 *
703 * Receive the triangle prior to multiplying by it.
704 *
705  CALL ztrrv2d(ictxt, 'L', 'N',
706  $ bw, bw,
707  $ work( 1 ), bw, myrow, mycol-1 )
708 *
709  CALL ztrmv(
710  $ 'L',
711  $ 'N',
712  $ 'N', bw,
713  $ work( 1 ), bw,
714  $ work( bw*bw+1 ), 1 )
715 *
716 * Zero out extraneous results from TRMV if any
717 *
718  IF( dl_p_m .GT. dl_p_n ) THEN
719  DO 30 i=1, dl_p_m-dl_p_n
720  work( bw*bw+i ) = 0
721  30 CONTINUE
722  ENDIF
723 *
724 * Send result back
725 *
726  CALL zgesd2d( ictxt, bw, 1, work(bw*bw+1 ),
727  $ bw, myrow, mycol-1 )
728 *
729 * Receive vector result from neighboring processor
730 *
731  CALL zgerv2d( ictxt, bw, 1, work( bw*bw+1 ),
732  $ bw, myrow, mycol-1 )
733 *
734 * Do addition of received vector
735 *
736  CALL zaxpy( bw, cone,
737  $ work( bw*bw+1 ), 1,
738  $ x( 1 ), 1 )
739 *
740  ENDIF
741 *
742 *
743 *
744  IF( mycol .LT. npcol-1 ) THEN
745 *
746 * Receive returned result
747 *
748  CALL zgerv2d( ictxt, bw, 1, work( bw*bw+1 ),
749  $ bw, myrow, mycol+1 )
750 *
751 * Do addition of received vector
752 *
753  CALL zaxpy( bw, cone,
754  $ work( bw*bw+1 ), 1,
755  $ x( numroc_size-bw+1 ), 1)
756 *
757  ENDIF
758 *
759 *
760  ENDIF
761 *
762 * End of LSAME if
763 *
764 **************************************
765 *
766  IF ( lsame( uplo, 'U' ) ) THEN
767 *
768 * Sizes of the extra triangles communicated bewtween processors
769 *
770  IF( mycol .GT. 0 ) THEN
771 *
772  dl_p_m= min( bw,
773  $ numroc( n, part_size, mycol, 0, npcol ) )
774  dl_p_n= min( bw,
775  $ numroc( n, part_size, mycol-1, 0, npcol ) )
776  ENDIF
777 *
778  IF( mycol .LT. npcol-1 ) THEN
779 *
780  dl_n_m= min( bw,
781  $ numroc( n, part_size, mycol+1, 0, npcol ) )
782  dl_n_n= min( bw,
783  $ numroc( n, part_size, mycol, 0, npcol ) )
784  ENDIF
785 *
786 *
787  IF( mycol .GT. 0 ) THEN
788 * ...must send triangle in lower half of matrix to left
789 *
790 * Transpose triangle in preparation for sending
791 *
792  CALL zlatcpy( 'L', bw, bw, a( ofst+1 ),
793  $ llda-1, work( 1 ), bw )
794 *
795 * Send the triangle to neighboring processor to left
796 *
797  CALL ztrsd2d(ictxt, 'U', 'N',
798  $ bw, bw,
799  $ work( 1 ),
800  $ bw, myrow, mycol-1 )
801 *
802  ENDIF
803 *
804 * Use main partition in each processor to multiply locally
805 *
806  CALL zhbmv( 'U', numroc_size, bw, cone, a( ofst+1 ), llda,
807  $ b(part_offset+1), 1, czero, x( part_offset+1 ), 1 )
808 *
809 *
810 *
811  IF ( mycol .LT. npcol-1 ) THEN
812 *
813 * Do the multiplication of the triangle in the lower half
814 *
815  CALL zcopy( dl_n_n,
816  $ b( numroc_size-dl_n_n+1 ),
817  $ 1, work( bw*bw+1+bw-dl_n_n ), 1 )
818 *
819 * Receive the triangle prior to multiplying by it.
820 *
821  CALL ztrrv2d(ictxt, 'U', 'N',
822  $ bw, bw,
823  $ work( 1 ), bw, myrow, mycol+1 )
824 *
825  CALL ztrmv( 'U', 'N', 'N', bw,
826  $ work( 1 ), bw,
827  $ work( bw*bw+1 ), 1)
828 *
829 * Zero out extraneous elements caused by TRMV if any
830 *
831  IF( dl_n_m .GT. dl_n_n ) THEN
832  DO 40 i = dl_n_m-dl_n_n, dl_n_m
833  work( bw*bw+i ) = 0
834  40 CONTINUE
835  ENDIF
836 *
837 * Send the result to the neighbor
838 *
839  CALL zgesd2d( ictxt, bw, 1,
840  $ work( bw*bw+1 ), bw, myrow, mycol+1 )
841 *
842  ENDIF
843 *
844  IF ( mycol .GT. 0 ) THEN
845 *
846  DO 50 i=1, bw*( bw+2 )
847  work( i ) = czero
848  50 CONTINUE
849 *
850 * Do the multiplication of the triangle in the upper half
851 *
852 * Copy vector to workspace
853 *
854  CALL zcopy( dl_p_m, b( 1 ), 1,
855  $ work( bw*bw+1 ), 1)
856 *
857  CALL ztrmv(
858  $ 'L',
859  $ 'N',
860  $ 'N', bw,
861  $ a( 1 ), llda-1,
862  $ work( bw*bw+1 ), 1 )
863 *
864 * Zero out extraneous results from TRMV if any
865 *
866  IF( dl_p_m .GT. dl_p_n ) THEN
867  DO 60 i=1, dl_p_m-dl_p_n
868  work( bw*bw+i ) = 0
869  60 CONTINUE
870  ENDIF
871 *
872 * Send result back
873 *
874  CALL zgesd2d( ictxt, bw, 1, work(bw*bw+1 ),
875  $ bw, myrow, mycol-1 )
876 *
877 * Receive vector result from neighboring processor
878 *
879  CALL zgerv2d( ictxt, bw, 1, work( bw*bw+1 ),
880  $ bw, myrow, mycol-1 )
881 *
882 * Do addition of received vector
883 *
884  CALL zaxpy( bw, cone,
885  $ work( bw*bw+1 ), 1,
886  $ x( 1 ), 1 )
887 *
888  ENDIF
889 *
890 *
891 *
892  IF( mycol .LT. npcol-1 ) THEN
893 *
894 * Receive returned result
895 *
896  CALL zgerv2d( ictxt, bw, 1, work( bw*bw+1 ),
897  $ bw, myrow, mycol+1 )
898 *
899 * Do addition of received vector
900 *
901  CALL zaxpy( bw, cone,
902  $ work( bw*bw+1 ), 1,
903  $ x( numroc_size-bw+1 ), 1)
904 *
905  ENDIF
906 *
907 *
908  ENDIF
909 *
910 * End of LSAME if
911 *
912 *
913 * Free BLACS space used to hold standard-form grid.
914 *
915  IF( ictxt_save .NE. ictxt_new ) THEN
916  CALL blacs_gridexit( ictxt_new )
917  ENDIF
918 *
919  1234 CONTINUE
920 *
921 * Restore saved input parameters
922 *
923  ictxt = ictxt_save
924  np = np_save
925 *
926 *
927  RETURN
928 *
929 * End of PZBhBMV1
930 *
931  END
globchk
subroutine globchk(ICTXT, N, X, LDX, IWORK, INFO)
Definition: pchkxmat.f:403
pzpbdcmv
subroutine pzpbdcmv(LDBW, BW, UPLO, N, A, JA, DESCA, NRHS, B, IB, DESCB, X, WORK, LWORK, INFO)
Definition: pzpbmv1.f:3
reshape
void reshape(int *context_in, int *major_in, int *context_out, int *major_out, int *first_proc, int *nprow_new, int *npcol_new)
Definition: reshape.c:77
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181
zlatcpy
subroutine zlatcpy(UPLO, M, N, A, LDA, B, LDB)
Definition: zlatcpy.f:2