ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pdgeqrrv.f
Go to the documentation of this file.
1  SUBROUTINE pdgeqrrv( M, N, A, IA, JA, DESCA, TAU, WORK )
2 *
3 * -- ScaLAPACK routine (version 1.7) --
4 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
5 * and University of California, Berkeley.
6 * May 28, 2001
7 *
8 * .. Scalar Arguments ..
9  INTEGER IA, JA, M, N
10 * ..
11 * .. Array Arguments ..
12  INTEGER DESCA( * )
13  DOUBLE PRECISION A( * ), TAU( * ), WORK( * )
14 * ..
15 *
16 * Purpose
17 * =======
18 *
19 * PDGEQRRV computes sub( A ) = A(IA:IA+M-1,JA:JA+N-1) from Q, R
20 * computed by PDGEQRF.
21 *
22 * Notes
23 * =====
24 *
25 * Each global data object is described by an associated description
26 * vector. This vector stores the information required to establish
27 * the mapping between an object element and its corresponding process
28 * and memory location.
29 *
30 * Let A be a generic term for any 2D block cyclicly distributed array.
31 * Such a global array has an associated description vector DESCA.
32 * In the following comments, the character _ should be read as
33 * "of the global array".
34 *
35 * NOTATION STORED IN EXPLANATION
36 * --------------- -------------- --------------------------------------
37 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
38 * DTYPE_A = 1.
39 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
40 * the BLACS process grid A is distribu-
41 * ted over. The context itself is glo-
42 * bal, but the handle (the integer
43 * value) may vary.
44 * M_A (global) DESCA( M_ ) The number of rows in the global
45 * array A.
46 * N_A (global) DESCA( N_ ) The number of columns in the global
47 * array A.
48 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
49 * the rows of the array.
50 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
51 * the columns of the array.
52 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
53 * row of the array A is distributed.
54 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
55 * first column of the array A is
56 * distributed.
57 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
58 * array. LLD_A >= MAX(1,LOCr(M_A)).
59 *
60 * Let K be the number of rows or columns of a distributed matrix,
61 * and assume that its process grid has dimension p x q.
62 * LOCr( K ) denotes the number of elements of K that a process
63 * would receive if K were distributed over the p processes of its
64 * process column.
65 * Similarly, LOCc( K ) denotes the number of elements of K that a
66 * process would receive if K were distributed over the q processes of
67 * its process row.
68 * The values of LOCr() and LOCc() may be determined via a call to the
69 * ScaLAPACK tool function, NUMROC:
70 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
71 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
72 * An upper bound for these quantities may be computed by:
73 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
74 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
75 *
76 * Arguments
77 * =========
78 *
79 * M (global input) INTEGER
80 * The number of rows to be operated on, i.e. the number of rows
81 * of the distributed submatrix sub( A ). M >= 0.
82 *
83 * N (global input) INTEGER
84 * The number of columns to be operated on, i.e. the number of
85 * columns of the distributed submatrix sub( A ). N >= 0.
86 *
87 * A (local input/local output) DOUBLE PRECISION pointer into the
88 * local memory to an array of dimension (LLD_A, LOCc(JA+N-1)).
89 * On entry, sub( A ) contains the the factors Q and R computed
90 * by PDGEQRF. On exit, the original matrix is restored.
91 *
92 * IA (global input) INTEGER
93 * The row index in the global array A indicating the first
94 * row of sub( A ).
95 *
96 * JA (global input) INTEGER
97 * The column index in the global array A indicating the
98 * first column of sub( A ).
99 *
100 * DESCA (global and local input) INTEGER array of dimension DLEN_.
101 * The array descriptor for the distributed matrix A.
102 *
103 * TAU (local input) DOUBLE PRECISION, array, dimension
104 * LOCc(JA+MIN(M,N)-1). This array contains the scalar factors
105 * TAU of the elementary reflectors computed by PDGEQRF. TAU
106 * is tied to the distributed matrix A.
107 *
108 * WORK (local workspace) DOUBLE PRECISION array, dimension (LWORK)
109 * LWORK = NB_A * ( 2*Mp0 + Nq0 + NB_A ), where
110 * Mp0 = NUMROC( M+IROFF, MB_A, MYROW, IAROW, NPROW ) * NB_A,
111 * Nq0 = NUMROC( N+ICOFF, NB_A, MYCOL, IACOL, NPCOL ) * MB_A,
112 * IROFF = MOD( IA-1, MB_A ), ICOFF = MOD( JA-1, NB_A ),
113 * IAROW = INDXG2P( IA, DESCA( MB_ ), MYROW, DESCA( RSRC_ ),
114 * NPROW ),
115 * IACOL = INDXG2P( JA, DESCA( NB_ ), MYCOL, DESCA( CSRC_ ),
116 * NPCOL ),
117 * and NUMROC, INDXG2P are ScaLAPACK tool functions;
118 * MYROW, MYCOL, NPROW and NPCOL can be determined by calling
119 * the subroutine BLACS_GRIDINFO.
120 *
121 * =====================================================================
122 *
123 * .. Parameters ..
124  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
125  $ LLD_, MB_, M_, NB_, N_, RSRC_
126  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
127  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
128  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
129  DOUBLE PRECISION ONE, ZERO
130  parameter( one = 1.0d+0, zero = 0.0d+0 )
131 * ..
132 * .. Local Scalars ..
133  CHARACTER COLBTOP, ROWBTOP
134  INTEGER IACOL, IAROW, I, ICTXT, IIA, IPT, IPV, IPW,
135  $ IROFF, IV, J, JB, JJA, JL, JN, K, MP, MYCOL,
136  $ MYROW, NPCOL, NPROW
137 * ..
138 * .. Local Arrays ..
139  INTEGER DESCV( DLEN_ )
140 * ..
141 * .. External Subroutines ..
142  EXTERNAL blacs_gridinfo, descset, infog2l, pdlacpy,
143  $ pdlarfb, pdlarft, pdlaset, pb_topget,
144  $ pb_topset
145 * ..
146 * .. External Functions ..
147  INTEGER ICEIL, INDXG2P, NUMROC
148  EXTERNAL iceil, indxg2p, numroc
149 * ..
150 * .. Intrinsic Functions ..
151  INTRINSIC max, min, mod
152 * ..
153 * .. Executable Statements ..
154 *
155 * Get grid parameters
156 *
157  ictxt = desca( ctxt_ )
158  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
159 *
160  iroff = mod( ia-1, desca( mb_ ) )
161  CALL infog2l( ia, ja, desca, nprow, npcol, myrow, mycol, iia, jja,
162  $ iarow, iacol )
163  mp = numroc( m+iroff, desca( mb_ ), myrow, iarow, nprow )
164  ipv = 1
165  ipt = ipv + mp * desca( nb_ )
166  ipw = ipt + desca( nb_ ) * desca( nb_ )
167  CALL pb_topget( ictxt, 'Broadcast', 'Rowwise', rowbtop )
168  CALL pb_topget( ictxt, 'Broadcast', 'Columnwise', colbtop )
169  CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', 'D-ring' )
170  CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', ' ' )
171 *
172  k = min( m, n )
173  jn = min( iceil( ja, desca( nb_ ) ) * desca( nb_ ), ja+k-1 )
174  jl = max( ( (ja+k-2) / desca( nb_ ) ) * desca( nb_ ) + 1, ja )
175 *
176  CALL descset( descv, m+iroff, desca( nb_ ), desca( mb_ ),
177  $ desca( nb_ ), iarow, indxg2p( jl, desca( nb_ ),
178  $ mycol, desca( csrc_ ), npcol ), ictxt,
179  $ max( 1, mp ) )
180 *
181  DO 10 j = jl, jn+1, -desca( nb_ )
182  jb = min( ja+k-j, desca( nb_ ) )
183  i = ia + j - ja
184  iv = 1 + j - ja + iroff
185 *
186 * Compute upper triangular matrix T
187 *
188  CALL pdlarft( 'Forward', 'Columnwise', m-i+ia, jb, a, i, j,
189  $ desca, tau, work( ipt ), work( ipw ) )
190 *
191 * Copy Householder vectors into workspace
192 *
193  CALL pdlacpy( 'Lower', m-i+ia, jb, a, i, j, desca, work( ipv ),
194  $ iv, 1, descv )
195  CALL pdlaset( 'Upper', m-i+ia, jb, zero, one, work( ipv ), iv,
196  $ 1, descv )
197 *
198 * Zeroes the strict lower triangular part of sub( A ) to get
199 * block column of R
200 *
201  CALL pdlaset( 'Lower', m-i+ia-1, jb, zero, zero, a, i+1, j,
202  $ desca )
203 *
204 * Apply block Householder transformation
205 *
206  CALL pdlarfb( 'Left', 'No transpose', 'Forward', 'Columnwise',
207  $ m-i+ia, n-j+ja, jb, work( ipv ), iv, 1, descv,
208  $ work( ipt ), a, i, j, desca, work( ipw ) )
209 *
210  descv( csrc_ ) = mod( descv( csrc_ ) + npcol - 1, npcol )
211 *
212  10 CONTINUE
213 *
214 * Handle first block separately
215 *
216  jb = jn - ja + 1
217 *
218 * Compute upper triangular matrix T
219 *
220  CALL pdlarft( 'Forward', 'Columnwise', m, jb, a, ia, ja, desca,
221  $ tau, work( ipt ), work( ipw ) )
222 *
223 * Copy Householder vectors into workspace
224 *
225  CALL pdlacpy( 'Lower', m, jb, a, ia, ja, desca, work( ipv ),
226  $ iroff+1, 1, descv )
227  CALL pdlaset( 'Upper', m, jb, zero, one, work, iroff+1, 1, descv )
228 *
229 * Zeroes the strict lower triangular part of sub( A ) to get block
230 * column of R
231 *
232  CALL pdlaset( 'Lower', m-1, jb, zero, zero, a, ia+1, ja, desca )
233 *
234 * Apply block Householder transformation
235 *
236  CALL pdlarfb( 'Left', 'No transpose', 'Forward', 'Columnwise', m,
237  $ n, jb, work( ipv ), iroff+1, 1, descv, work( ipt ),
238  $ a, ia, ja, desca, work( ipw ) )
239 *
240  CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', rowbtop )
241  CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', colbtop )
242 *
243  RETURN
244 *
245 * End of PDGEQRRV
246 *
247  END
max
#define max(A, B)
Definition: pcgemr.c:180
infog2l
subroutine infog2l(GRINDX, GCINDX, DESC, NPROW, NPCOL, MYROW, MYCOL, LRINDX, LCINDX, RSRC, CSRC)
Definition: infog2l.f:3
pdlarft
subroutine pdlarft(DIRECT, STOREV, N, K, V, IV, JV, DESCV, TAU, T, WORK)
Definition: pdlarft.f:3
descset
subroutine descset(DESC, M, N, MB, NB, IRSRC, ICSRC, ICTXT, LLD)
Definition: descset.f:3
pdlaset
subroutine pdlaset(UPLO, M, N, ALPHA, BETA, A, IA, JA, DESCA)
Definition: pdblastst.f:6862
pdlarfb
subroutine pdlarfb(SIDE, TRANS, DIRECT, STOREV, M, N, K, V, IV, JV, DESCV, T, C, IC, JC, DESCC, WORK)
Definition: pdlarfb.f:3
pdgeqrrv
subroutine pdgeqrrv(M, N, A, IA, JA, DESCA, TAU, WORK)
Definition: pdgeqrrv.f:2
pdlacpy
subroutine pdlacpy(UPLO, M, N, A, IA, JA, DESCA, B, IB, JB, DESCB)
Definition: pdlacpy.f:3
min
#define min(A, B)
Definition: pcgemr.c:181