Jump to content

  • Log In with Google      Sign In   
  • Create Account


Why are my Compute Shaders so slow?


Old topic!
Guest, the last post of this topic is over 60 days old and at this point you may not reply in this topic. If you wish to continue this conversation start a new topic.

  • You cannot reply to this topic
6 replies to this topic

#1 CryZe   Members   -  Reputation: 768

Like
0Likes
Like

Posted 29 February 2012 - 05:50 PM

I have written 3 compute shaders and 2 of them have "huge" performance issues. And interestingly, it's these where you don't expect it from.

This compute shader takes up to 1024 lights as an input and performs per tile light culling and calculates the lighting based on the provided GBuffer. I dispatch this compute shader 40x30x1 times. A thread group consists of 16x16x1 threads. The shader takes about 1.3ms to finish.

cs_5_0
dcl_globalFlags refactoringAllowed
dcl_constantbuffer cb0[6], immediateIndexed
dcl_resource_texture2d (float,float,float,float) t0
dcl_resource_texture2d (float,float,float,float) t1
dcl_resource_texture2d (float,float,float,float) t2
dcl_resource_structured t3, 36
dcl_uav_typed_texture2d (float,float,float,float) u0
dcl_input vThreadIDInGroupFlattened
dcl_input vThreadGroupID.xy
dcl_input vThreadID.xyz
dcl_temps 13
dcl_tgsm_raw g0, 4
dcl_tgsm_structured g1, 4, 1024
dcl_tgsm_raw g2, 4
dcl_tgsm_raw g3, 4
dcl_thread_group 16, 16, 1
store_raw g0.x, l(0), l(0)
store_raw g2.x, l(0), l(-1)
store_raw g3.x, l(0), l(0)
utof r0.xy, vThreadID.xyxx
add r1.xyz, cb0[5].xyzx, l(-1.000000, -1.000000, 255.000000, 0.000000)
div r0.xy, r0.xyxx, r1.xyxx
ld_indexable(texture2d)(float,float,float,float) r2.xyzw, vThreadID.xyzz, t1.xyzw
ld_indexable(texture2d)(float,float,float,float) r3.xyzw, vThreadID.xyzz, t2.xyzw
mad r3.xyz, r3.xyzx, l(2.000000, 2.000000, 2.000000, 0.000000), l(-1.000000, -1.000000, -1.000000, 0.000000)
dp3 r0.z, r3.xyzx, r3.xyzx
rsq r0.z, r0.z
mul r3.xyz, r0.zzzz, r3.xyzx
mul r0.z, r3.w, l(7.213475)
exp r0.z, r0.z
ld_indexable(texture2d)(float,float,float,float) r0.w, vThreadID.xyzz, t0.yzwx
sync_g_t
atomic_umin g2, l(0), r0.w
atomic_umax g3, l(0), r0.w
sync_g_t
ld_raw r1.w, l(0), g2.xxxx
ld_raw r3.w, l(0), g3.xxxx
utof r4.xy, vThreadGroupID.xyxx
mul r4.zw, r4.xxxy, l(0.000000, 0.000000, 16.000000, 16.000000)
div r4.zw, r4.zzzw, r1.xxxy
mad r5.xyzw, r4.xyxy, l(16.000000, 16.000000, 16.000000, 16.000000), l(15.000000, 0.000000, 0.000000, 15.000000)
div r5.xyzw, r5.xyzw, r1.xyxy
mad r4.xy, r4.xyxx, l(16.000000, 16.000000, 0.000000, 0.000000), l(15.000000, 15.000000, 0.000000, 0.000000)
div r1.xy, r4.xyxx, r1.xyxx
add r4.xy, -r4.wzww, l(1.000000, 1.000000, 0.000000, 0.000000)
mul r6.x, r4.x, r4.y
mul r4.xy, r4.xyxx, r4.zwzz
mul r7.xyzw, r4.xxxx, cb0[1].xyzw
mad r6.xyzw, r6.xxxx, cb0[0].xyzw, r7.xyzw
mad r6.xyzw, r4.yyyy, cb0[2].xyzw, r6.xyzw
mul r4.x, r4.w, r4.z
mad r4.xyzw, r4.xxxx, cb0[3].xyzw, r6.xyzw
add r6.xyzw, -r5.yxwz, l(1.000000, 1.000000, 1.000000, 1.000000)
mul r7.xy, r6.xzxx, r6.ywyy
mul r6.xyzw, r5.xyzw, r6.xyzw
mul r8.xyzw, r6.xxxx, cb0[1].xyzw
mad r8.xyzw, r7.xxxx, cb0[0].xyzw, r8.xyzw
mad r8.xyzw, r6.yyyy, cb0[2].xyzw, r8.xyzw
mul r5.xy, r5.ywyy, r5.xzxx
mad r8.xyzw, r5.xxxx, cb0[3].xyzw, r8.xyzw
mul r9.xyzw, r6.zzzz, cb0[1].xyzw
mad r7.xyzw, r7.yyyy, cb0[0].xyzw, r9.xyzw
mad r6.xyzw, r6.wwww, cb0[2].xyzw, r7.xyzw
mad r5.xyzw, r5.yyyy, cb0[3].xyzw, r6.xyzw
add r6.xy, -r1.yxyy, l(1.000000, 1.000000, 0.000000, 0.000000)
mul r6.z, r6.x, r6.y
mul r6.xy, r1.xyxx, r6.xyxx
mul r7.xyzw, r6.xxxx, cb0[1].xyzw
mad r7.xyzw, r6.zzzz, cb0[0].xyzw, r7.xyzw
mad r6.xyzw, r6.yyyy, cb0[2].xyzw, r7.xyzw
mul r1.x, r1.y, r1.x
mad r6.xyzw, r1.xxxx, cb0[3].xyzw, r6.xyzw
mad r7.xyzw, r1.wwww, cb0[4].xyzw, r4.xyzw
div r7.xyz, r7.xyzx, r7.wwww
mad r4.xyzw, r3.wwww, cb0[4].xyzw, r4.xyzw
div r4.xyz, r4.xyzx, r4.wwww
mad r9.xyzw, r1.wwww, cb0[4].xyzw, r8.xyzw
div r9.xyz, r9.xyzx, r9.wwww
mad r8.xyzw, r3.wwww, cb0[4].xyzw, r8.xyzw
div r8.xyz, r8.xyzx, r8.wwww
mad r10.xyzw, r1.wwww, cb0[4].xyzw, r5.xyzw
div r10.xyz, r10.xyzx, r10.wwww
mad r5.xyzw, r3.wwww, cb0[4].xyzw, r5.xyzw
div r5.xyz, r5.xyzx, r5.wwww
mad r11.xyzw, r1.wwww, cb0[4].xyzw, r6.xyzw
div r1.xyw, r11.xyxz, r11.wwww
mad r6.xyzw, r3.wwww, cb0[4].xyzw, r6.xyzw
div r6.xyz, r6.xyzx, r6.wwww
mul r1.z, r1.z, l(0.003906)
utof r3.w, vThreadIDInGroupFlattened.x
mov r4.w, l(0)
loop
  utof r5.w, r4.w
  ge r6.w, r5.w, r1.z
  breakc_nz r6.w
  mad r5.w, r5.w, l(256.000000), r3.w
  ftou r5.w, r5.w
  utof r5.w, r5.w
  min r5.w, r5.w, cb0[5].z
  ftou r5.w, r5.w
  ld_structured_indexable(structured_buffer, stride=36)(mixed,mixed,mixed,mixed) r11.xyz, r5.w, l(0), t3.xyzx
  ld_structured_indexable(structured_buffer, stride=36)(mixed,mixed,mixed,mixed) r6.w, r5.w, l(32), t3.xxxx
  add r12.xyz, r7.xyzx, -r11.xyzx
  dp3 r7.w, r12.xyzx, r12.xyzx
  ge r7.w, r6.w, r7.w
  and r7.w, r7.w, l(1)
  add r12.xyz, r4.xyzx, -r11.xyzx
  dp3 r8.w, r12.xyzx, r12.xyzx
  ge r8.w, r6.w, r8.w
  and r8.w, r8.w, l(1)
  or r7.w, r7.w, r8.w
  add r12.xyz, r9.xyzx, -r11.xyzx
  dp3 r8.w, r12.xyzx, r12.xyzx
  ge r8.w, r6.w, r8.w
  and r8.w, r8.w, l(1)
  or r7.w, r7.w, r8.w
  add r12.xyz, r8.xyzx, -r11.xyzx
  dp3 r8.w, r12.xyzx, r12.xyzx
  ge r8.w, r6.w, r8.w
  and r8.w, r8.w, l(1)
  or r7.w, r7.w, r8.w
  add r12.xyz, r10.xyzx, -r11.xyzx
  dp3 r8.w, r12.xyzx, r12.xyzx
  ge r8.w, r6.w, r8.w
  and r8.w, r8.w, l(1)
  or r7.w, r7.w, r8.w
  add r12.xyz, r5.xyzx, -r11.xyzx
  dp3 r8.w, r12.xyzx, r12.xyzx
  ge r8.w, r6.w, r8.w
  and r8.w, r8.w, l(1)
  or r7.w, r7.w, r8.w
  add r12.xyz, r1.xywx, -r11.xyzx
  dp3 r8.w, r12.xyzx, r12.xyzx
  ge r8.w, r6.w, r8.w
  and r8.w, r8.w, l(1)
  or r7.w, r7.w, r8.w
  add r11.xyz, r6.xyzx, -r11.xyzx
  dp3 r8.w, r11.xyzx, r11.xyzx
  ge r6.w, r6.w, r8.w
  and r6.w, r6.w, l(1)
  or r6.w, r6.w, r7.w
  if_nz r6.w
	imm_atomic_iadd r11.x, g0, l(0), l(1)
	store_structured g1.x, r11.x, l(0), r5.w
  endif
  iadd r4.w, r4.w, l(1)
endloop
sync_g_t
add r1.xy, -r0.yxyy, l(1.000000, 1.000000, 0.000000, 0.000000)
mul r1.z, r1.x, r1.y
mul r1.xy, r0.xyxx, r1.xyxx
mul r4.xyzw, r1.xxxx, cb0[1].xyzw
mad r4.xyzw, r1.zzzz, cb0[0].xyzw, r4.xyzw
mad r1.xyzw, r1.yyyy, cb0[2].xyzw, r4.xyzw
mul r0.x, r0.y, r0.x
mad r1.xyzw, r0.xxxx, cb0[3].xyzw, r1.xyzw
mad r1.xyzw, r0.wwww, cb0[4].xyzw, r1.xyzw
div r0.xyw, r1.xyxz, r1.wwww
dp3 r1.x, -r0.xywx, -r0.xywx
rsq r1.x, r1.x
ld_raw r1.y, l(0), g0.xxxx
mov r4.xyz, l(0,0,0,0)
mov r5.xyz, l(0,0,0,0)
mov r1.z, l(0)
loop
  uge r1.w, r1.z, r1.y
  breakc_nz r1.w
  ld_structured r1.w, r1.z, l(0), g1.xxxx
  ld_structured_indexable(structured_buffer, stride=36)(mixed,mixed,mixed,mixed) r6.xyz, r1.w, l(0), t3.xyzx
  ld_structured_indexable(structured_buffer, stride=36)(mixed,mixed,mixed,mixed) r7.xyzw, r1.w, l(16), t3.xyzw
  add r6.xyz, -r0.xywx, r6.xyzx
  dp3 r1.w, r6.xyzx, r6.xyzx
  sqrt r3.w, r1.w
  rsq r1.w, r1.w
  mul r6.xyz, r1.wwww, r6.xyzx
  mad r8.xyz, -r0.xywx, r1.xxxx, r6.xyzx
  dp3 r1.w, r8.xyzx, r8.xyzx
  rsq r1.w, r1.w
  mul r8.xyz, r1.wwww, r8.xyzx
  dp3_sat r1.w, r3.xyzx, r6.xyzx
  div r3.w, r7.w, r3.w
  mul r3.w, r3.w, r3.w
  mul r1.w, r1.w, r3.w
  mul r9.xyz, r7.xyzx, r1.wwww
  mad r5.xyz, r1.wwww, r7.xyzx, r5.xyzx
  dp3_sat r1.w, r3.xyzx, r8.xyzx
  log r1.w, r1.w
  mul r1.w, r0.z, r1.w
  exp r1.w, r1.w
  dp3_sat r3.w, r6.xyzx, r8.xyzx
  mul r4.w, r3.w, r3.w
  mul r3.w, r3.w, r4.w
  div r1.w, r1.w, r3.w
  mad r4.xyz, r1.wwww, r9.xyzx, r4.xyzx
  iadd r1.z, r1.z, l(1)
endloop
add r0.x, r0.z, l(1.000000)
mul r0.x, r2.w, r0.x
mul r0.x, r0.x, l(0.125000)
mul r0.xyz, r4.xyzx, r0.xxxx
mad r0.xyz, r5.xyzx, r2.xyzx, r0.xyzx
mov r0.w, l(1.000000)
store_uav_typed u0.xyzw, vThreadID.xyyy, r0.xyzw
ret

Now I have another compute shader. This one basically takes 4 images as an input and performs a simple multiplication of pixels that contain "complex colors" (colors, where each component is not just a real number, but a complex number) and writes the result into 2 UAVs. I dispatch this shader 16x16x1 times and a thread group consists of 32x32x1 threads. This shader takes about 1.1ms to finish.

cs_5_0
dcl_globalFlags refactoringAllowed
dcl_resource_texture2d (float,float,float,float) t0
dcl_resource_texture2d (float,float,float,float) t1
dcl_resource_texture2d (float,float,float,float) t2
dcl_resource_texture2d (float,float,float,float) t3
dcl_uav_typed_texture2d (float,float,float,float) u0
dcl_uav_typed_texture2d (float,float,float,float) u1
dcl_input vThreadID.xy
dcl_temps 5
dcl_thread_group 32, 32, 1
mov r0.xy, vThreadID.xyxx
mov r0.zw, l(0,0,0,0)
ld_indexable(texture2d)(float,float,float,float) r1.xyzw, r0.xyww, t1.xyzw
ld_indexable(texture2d)(float,float,float,float) r2.xyzw, r0.xyzw, t3.xyzw
mul r3.xyzw, r1.xyzw, r2.xyzw
ld_indexable(texture2d)(float,float,float,float) r4.xyzw, r0.xyww, t0.xyzw
ld_indexable(texture2d)(float,float,float,float) r0.xyzw, r0.xyww, t2.xyzw
mad r3.xyzw, r4.xyzw, r0.xyzw, -r3.xyzw
mul r0.xyzw, r1.xyzw, r0.xyzw
mad r0.xyzw, r4.xyzw, r2.xyzw, r0.xyzw
store_uav_typed u1.xyzw, vThreadID.xyyy, r0.xyzw
store_uav_typed u0.xyzw, vThreadID.xyyy, r3.xyzw
ret

This is weird enough, since the first one should be way slower than the second one, but is only about 18% slower. The first one also has 188 instructions and multiple loops where as the second one has just 13 instructions and no loops.

Now let's take a look at the actual problematic shader. I've implemented a simple iterative fast fourier transform algorithm with compute shader. I dispatch it 1x512x1 times and each thread group consists of 512x1x1 threads. It performs 9 unrolled iterations that all work with groupshared memory. It takes a single SRV and two UAVs as output (one for the real part of the colors, one for the imaginary part). This shader has just 160 instructions. That's even less than the first one. It has no loops, but takes a whole lot of time longer.

cs_5_0
dcl_globalFlags refactoringAllowed
dcl_resource_texture2d (float,float,float,float) t0
dcl_uav_typed_texture2d (float,float,float,float) u0
dcl_uav_typed_texture2d (float,float,float,float) u1
dcl_input vThreadID.xy
dcl_temps 11
dcl_tgsm_structured g0, 32, 512
dcl_thread_group 512, 1, 1
mov r0.xy, vThreadID.xyxx
mov r0.zw, l(0,0,0,0)
ld_indexable(texture2d)(float,float,float,float) r0.xyzw, r0.xyzw, t0.xyzw
bfrev r1.x, vThreadID.x
ushr r2.x, r1.x, l(23)
store_structured g0.xyzw, r2.x, l(0), r0.xyzw
store_structured g0.xyzw, r2.x, l(16), l(0,0,0,0)
sync_g_t
ubfe r0.xyzw, l(1, 2, 3, 4), l(23, 23, 23, 23), r1.xxxx
ubfe r1.xyzw, l(5, 6, 7, 8), l(23, 23, 23, 23), r1.xxxx
iadd r3.xyzw, r2.xxxx, -r0.xyzw
ld_structured r4.xyzw, r3.x, l(0), g0.xyzw
ld_structured r5.xyzw, r3.x, l(16), g0.xyzw
iadd r3.x, r3.x, l(1)
iadd r3.yzw, r3.yyzw, r0.xxyz
ld_structured r6.xyzw, r3.x, l(0), g0.xyzw
ld_structured r7.xyzw, r3.x, l(16), g0.xyzw
sync_g_t
ult r8.xyzw, r0.xyzw, l(1, 2, 4, 8)
movc r8.xyzw, r8.xyzw, l(1.000000,1.000000,1.000000,1.000000), l(-1.000000,-1.000000,-1.000000,-1.000000)
mad r4.xyzw, r8.xxxx, r6.xyzw, r4.xyzw
store_structured g0.xyzw, r2.x, l(0), r4.xyzw
mad r4.xyzw, r8.xxxx, r7.xyzw, r5.xyzw
store_structured g0.xyzw, r2.x, l(16), r4.xyzw
sync_g_t
ld_structured r4.xyzw, r3.y, l(0), g0.xyzw
ld_structured r5.xyzw, r3.y, l(16), g0.xyzw
iadd r6.xyz, r3.yzwy, l(2, 4, 8, 0)
ld_structured r7.xyzw, r6.x, l(0), g0.xyzw
ld_structured r9.xyzw, r6.x, l(16), g0.xyzw
sync_g_t
utof r10.xyzw, r0.xyzw
mul r10.xyzw, r10.xyzw, l(-1.570796, -0.785398, -0.392699, -0.196350)
sincos r0.x, r3.x, r10.x
mov r3.y, r0.x
mul r0.xy, r8.yyyy, r3.xyxx
mad r4.xyzw, r0.xxxx, r7.xyzw, r4.xyzw
mad r4.xyzw, -r0.yyyy, r9.xyzw, r4.xyzw
mad r5.xyzw, r0.xxxx, r9.xyzw, r5.xyzw
mad r5.xyzw, r0.yyyy, r7.xyzw, r5.xyzw
store_structured g0.xyzw, r2.x, l(0), r4.xyzw
store_structured g0.xyzw, r2.x, l(16), r5.xyzw
sync_g_t
ld_structured r4.xyzw, r3.z, l(0), g0.xyzw
ld_structured r5.xyzw, r3.z, l(16), g0.xyzw
ld_structured r7.xyzw, r6.y, l(0), g0.xyzw
ld_structured r9.xyzw, r6.y, l(16), g0.xyzw
sync_g_t
sincos r0.x, r3.x, r10.y
mov r3.y, r0.x
mul r0.xy, r8.zzzz, r3.xyxx
mad r4.xyzw, r0.xxxx, r7.xyzw, r4.xyzw
mad r4.xyzw, -r0.yyyy, r9.xyzw, r4.xyzw
mad r5.xyzw, r0.xxxx, r9.xyzw, r5.xyzw
mad r5.xyzw, r0.yyyy, r7.xyzw, r5.xyzw
store_structured g0.xyzw, r2.x, l(0), r4.xyzw
store_structured g0.xyzw, r2.x, l(16), r5.xyzw
sync_g_t
ld_structured r4.xyzw, r3.w, l(0), g0.xyzw
ld_structured r3.xyzw, r3.w, l(16), g0.xyzw
ld_structured r5.xyzw, r6.z, l(0), g0.xyzw
ld_structured r6.xyzw, r6.z, l(16), g0.xyzw
sync_g_t
sincos r0.x, r7.x, r10.z
sincos r8.x, r9.x, r10.w
mov r7.y, r0.x
mul r0.xy, r8.wwww, r7.xyxx
mad r4.xyzw, r0.xxxx, r5.xyzw, r4.xyzw
mad r4.xyzw, -r0.yyyy, r6.xyzw, r4.xyzw
mad r3.xyzw, r0.xxxx, r6.xyzw, r3.xyzw
mad r3.xyzw, r0.yyyy, r5.xyzw, r3.xyzw
store_structured g0.xyzw, r2.x, l(0), r4.xyzw
store_structured g0.xyzw, r2.x, l(16), r3.xyzw
sync_g_t
iadd r3.xyzw, r2.xxxx, -r1.xyzw
iadd r0.x, r3.x, r0.w
iadd r0.yzw, r3.yyzw, r1.xxyz
ld_structured r3.xyzw, r0.x, l(0), g0.xyzw
ld_structured r4.xyzw, r0.x, l(16), g0.xyzw
iadd r0.x, r0.x, l(16)
ld_structured r5.xyzw, r0.x, l(0), g0.xyzw
ld_structured r6.xyzw, r0.x, l(16), g0.xyzw
sync_g_t
mov r9.y, r8.x
ult r7.xyzw, r1.xyzw, l(16, 32, 64, 128)
movc r7.xyzw, r7.xyzw, l(1.000000,1.000000,1.000000,1.000000), l(-1.000000,-1.000000,-1.000000,-1.000000)
mul r8.xy, r7.xxxx, r9.xyxx
mad r3.xyzw, r8.xxxx, r5.xyzw, r3.xyzw
mad r3.xyzw, -r8.yyyy, r6.xyzw, r3.xyzw
mad r4.xyzw, r8.xxxx, r6.xyzw, r4.xyzw
mad r4.xyzw, r8.yyyy, r5.xyzw, r4.xyzw
store_structured g0.xyzw, r2.x, l(0), r3.xyzw
store_structured g0.xyzw, r2.x, l(16), r4.xyzw
sync_g_t
ld_structured r3.xyzw, r0.y, l(0), g0.xyzw
ld_structured r4.xyzw, r0.y, l(16), g0.xyzw
iadd r5.xyz, r0.yzwy, l(32, 64, 128, 0)
ld_structured r6.xyzw, r5.x, l(0), g0.xyzw
ld_structured r8.xyzw, r5.x, l(16), g0.xyzw
sync_g_t
utof r9.xyzw, r1.xyzw
mul r9.xyzw, r9.xyzw, l(-0.098175, -0.049087, -0.024544, -0.012272)
sincos r0.x, r1.x, r9.x
mov r1.y, r0.x
mul r0.xy, r7.yyyy, r1.xyxx
mad r3.xyzw, r0.xxxx, r6.xyzw, r3.xyzw
mad r3.xyzw, -r0.yyyy, r8.xyzw, r3.xyzw
mad r4.xyzw, r0.xxxx, r8.xyzw, r4.xyzw
mad r4.xyzw, r0.yyyy, r6.xyzw, r4.xyzw
store_structured g0.xyzw, r2.x, l(0), r3.xyzw
store_structured g0.xyzw, r2.x, l(16), r4.xyzw
sync_g_t
ld_structured r3.xyzw, r0.z, l(0), g0.xyzw
ld_structured r4.xyzw, r0.z, l(16), g0.xyzw
ld_structured r6.xyzw, r5.y, l(0), g0.xyzw
ld_structured r8.xyzw, r5.y, l(16), g0.xyzw
sync_g_t
sincos r0.x, r1.x, r9.y
mov r1.y, r0.x
mul r0.xy, r7.zzzz, r1.xyxx
mad r3.xyzw, r0.xxxx, r6.xyzw, r3.xyzw
mad r3.xyzw, -r0.yyyy, r8.xyzw, r3.xyzw
mad r4.xyzw, r0.xxxx, r8.xyzw, r4.xyzw
mad r4.xyzw, r0.yyyy, r6.xyzw, r4.xyzw
store_structured g0.xyzw, r2.x, l(0), r3.xyzw
store_structured g0.xyzw, r2.x, l(16), r4.xyzw
sync_g_t
ld_structured r3.xyzw, r0.w, l(0), g0.xyzw
ld_structured r0.xyzw, r0.w, l(16), g0.xyzw
ld_structured r4.xyzw, r5.z, l(0), g0.xyzw
ld_structured r5.xyzw, r5.z, l(16), g0.xyzw
sync_g_t
sincos r1.x, r6.x, r9.z
sincos r7.x, r8.x, r9.w
mov r6.y, r1.x
mul r1.xy, r7.wwww, r6.xyxx
mad r3.xyzw, r1.xxxx, r4.xyzw, r3.xyzw
mad r3.xyzw, -r1.yyyy, r5.xyzw, r3.xyzw
mad r0.xyzw, r1.xxxx, r5.xyzw, r0.xyzw
mad r0.xyzw, r1.yyyy, r4.xyzw, r0.xyzw
store_structured g0.xyzw, r2.x, l(0), r3.xyzw
store_structured g0.xyzw, r2.x, l(16), r0.xyzw
sync_g_t
ld_structured r0.xyzw, r1.w, l(0), g0.xyzw
iadd r1.x, r1.w, l(256)
ld_structured r3.xyzw, r1.w, l(16), g0.xyzw
ld_structured r4.xyzw, r1.x, l(0), g0.xyzw
ld_structured r1.xyzw, r1.x, l(16), g0.xyzw
mov r8.y, r7.x
ult r5.x, r2.x, l(256)
movc r5.x, r5.x, l(1.000000), l(-1.000000)
mul r5.xy, r5.xxxx, r8.xyxx
mad r0.xyzw, r5.xxxx, r4.xyzw, r0.xyzw
mad r0.xyzw, -r5.yyyy, r1.xyzw, r0.xyzw
mad r1.xyzw, r5.xxxx, r1.xyzw, r3.xyzw
mad r1.xyzw, r5.yyyy, r4.xyzw, r1.xyzw
mov r2.yzw, vThreadID.yyyy
store_uav_typed u0.xyzw, r2.xwww, r0.xyzw
store_uav_typed u1.xyzw, r2.xyzw, r1.xyzw
ret

It takes 15ms...

I just can't imagine what the problem could be. I even wrote a version of this shader where most of the values are loaded from a precomputed buffer, but that literally didn't make any difference in performance at all.

Sponsor:

#2 MJP   Moderators   -  Reputation: 10106

Like
0Likes
Like

Posted 29 February 2012 - 08:56 PM

How are you measuring the performance of these shaders?

#3 CryZe   Members   -  Reputation: 768

Like
0Likes
Like

Posted 01 March 2012 - 06:59 AM

I'm using Intel Graphics Performance Analyzer. When analyzing a frame with it I get 4 extreme peaks. The FFT shader is being dispatched 4 times per frame, which results in a frame time of 64ms (~15FPS). When disabling these 4 dispatch calls, I have a frame time of just 4ms (~250FPS). Even without Intel GPA, just by looking at the FPS, you can calculate that this compute shader takes about 15ms.

Here's a screenshot of the statistics that Intel GPA measured:
Posted Image
5 is the light accumulation shader (the first shader), 9 is the convolution shader (the second shader) and 7, 8, 10 and 11 is the fft shader (the third shader).

#4 CryZe   Members   -  Reputation: 768

Like
0Likes
Like

Posted 03 March 2012 - 03:29 PM

I still couldn't figure out what the cause of the problem could be. Here's the source of the shader:

struct Complex4
{
  float4 Real;
  float4 Imaginary;
};

Texture2D<float4> SourceRealBuffer : register(t0);
RWTexture2D<float4> RealBuffer : register(u0);
RWTexture2D<float4> ImaginaryBuffer : register(u1);

groupshared Complex4 values[ELEMENTS];

uint revert(uint index)
{
  return reversebits(index) >> (32 - LOG_ELEMENTS);
}

uint3 getSamplePosition(uint3 dispatchThreadId)
{
  return dispatchThreadId;
}

uint3 getIndexPosition(uint3 samplePosition, uint index)
{
  return uint3(index, samplePosition.yz);
}

float getExponentFactor()
{
  return -6.2831853f;
}

Complex4 getWeightedResult(Complex4 value)
{
  return value;
}

Complex4 getValue(uint3 samplePosition)
{
  Complex4 value = (Complex4)0;
  value.Real = SourceRealBuffer[samplePosition.xy];
  return value;
}

void setValue(uint3 savePosition, Complex4 value)
{
  RealBuffer[savePosition.xy] = value.Real;
  ImaginaryBuffer[savePosition.xy] = value.Imaginary;
}


[numthreads(ELEMENTS,1,1)]
void CSMain(
   uint3 groupId : SV_GroupID,
   uint3 groupThreadId : SV_GroupThreadID,
   uint groupIndex: SV_GroupIndex,
   uint3 dispatchThreadId : SV_DispatchThreadID)
{
  uint3 samplePosition = getSamplePosition(dispatchThreadId);
	
  //Revert the indices and load the values into groupshared memory
  uint index = revert(dispatchThreadId.x);
  values[index] = getValue(samplePosition);

  //Sync to ensure that all the values are loaded
  GroupMemoryBarrierWithGroupSync();

  uint d = 1;
  uint dHalf = 0;

  [unroll]
  for (uint n=1; n <= LOG_ELEMENTS; ++n) //Iterate over all the FFTs
  {
	//The size of the FFT
	dHalf = d;
	d <<= 1;

	//Compute temp values
	uint k = index % d;
	uint expK = k % dHalf;
	uint m = index - k + expK;

	//Compute w, the factor for the odd values
	float exponent = getExponentFactor() * expK / d;
	float2 w = 0;
	sincos(exponent, w.y, w.x);
	w *= (k < dHalf) ? 1.0f : -1.0f;

	//Read the values
	Complex4 even = values[m];
	Complex4 odd = values[m + dHalf];

	//Sync to ensure that every value is read before writing the results
	GroupMemoryBarrierWithGroupSync();

	//Write results
	//y = even + w * odd

	//Real madd: Re(a+b*c) = Re(a)+Re(b)*Re(c)-Im(b)*Im(c)
	values[index].Real = even.Real + w.x * odd.Real - w.y * odd.Imaginary;

	//Imaginary madd: Im(a+b*c) = Im(a)+Re(b)*Im(c)+Im(b)*Re(c)
	values[index].Imaginary = even.Imaginary + w.x * odd.Imaginary + w.y * odd.Real;

	//Sync to ensure that every value is written before calculating the other FFTs
	GroupMemoryBarrierWithGroupSync();
  }
  
  //Write the results
  uint3 savePosition = getIndexPosition(samplePosition, index);
  setValue(savePosition, getWeightedResult(values[index]));
}


#5 MJP   Moderators   -  Reputation: 10106

Like
1Likes
Like

Posted 03 March 2012 - 05:42 PM

Yeah that is pretty weird. Taking a quick look at your shader you do have quite a bit of shared memory usage, both in terms of accessing it and the amount that you allocate (16k, it looks like). This is a bit of a shot in the dark, but you could try using half-precision when storing values in shared memory to cut your usage in half. You can use f32tof16 and f16tof32 to do the conversions.

#6 CryZe   Members   -  Reputation: 768

Like
0Likes
Like

Posted 04 March 2012 - 07:04 AM

I've tried using half precision floats before, but the resulting image looked incredibly horrible. Maybe I could try scaling up all the values while in frequency space to get more precision in lower values.

#7 CryZe   Members   -  Reputation: 768

Like
0Likes
Like

Posted 11 March 2012 - 06:05 AM

After looking through NVidia's implementation of the FFT I noticed, that they were only using 128 threads per thread group. I realized that my graphics card might not have enough shader cores to run all threads in parallel (400). So everytime all threads would need to be synchronized the driver would have to reorganize all the workload the shader cores are doing so that all the 512 threads can be synchronized.
So I tried implementing the FFT a bit more iterative to only have 128 threads as well. And indeed, the shader is almost twice as fast. It only needs 8.6ms now.
Posted Image

All 4 times the shader gets dispatched results in 34.6ms now. If I'd reduce the resolution to 256x256, all 4 dispatches together would only last 7.68ms. But I'm still looking for ways to improve it before reducing the resolution...

Here's the current shader:

[numthreads(ELEMENTS/ITERATION_FACTOR,1,1)]
void CSMain(
  uint3 groupId : SV_GroupID,
  uint3 groupThreadId : SV_GroupThreadID,
  uint groupIndex: SV_GroupIndex,
  uint3 dispatchThreadId : SV_DispatchThreadID)
{
  Complex4 tempComplexValues[ITERATION_FACTOR];

  uint index = dispatchThreadId.x;

  //Revert the indices and load the values into groupshared memory
  [unroll] //Do some FFTs iterative instead of parallel
  for (uint i = 0, iterationIndex = index; i < ITERATION_FACTOR; ++i, iterationIndex += ELEMENTS/ITERATION_FACTOR)
  {
    values[revert(iterationIndex)] = getValue(getSamplePosition(uint3(iterationIndex, dispatchThreadId.yz)));
  }

  //Sync to ensure that all the values are loaded
  GroupMemoryBarrierWithGroupSync();

  uint d = 1;
  uint dHalf = 0;

  [unroll] //Iterate over all the FFTs
  for (uint n = 1; n <= LOG_ELEMENTS; ++n)
  {
    //The size of the FFT
    dHalf = d;
    d <<= 1;

    [unroll] //Do some FFTs iterative instead of parallel
    for (uint i = 0, iterationIndex = index; i < ITERATION_FACTOR; ++i, iterationIndex += ELEMENTS/ITERATION_FACTOR)
    {
      //Compute temp values
      uint k = iterationIndex & (d - 1);
      uint expK = k & (dHalf - 1);
      uint m = iterationIndex - k + expK;

      //Compute w, the factor for the odd values
      float exponent = getExponentFactor() * expK / d;
      float2 w = 0;
      sincos(exponent, w.y, w.x);
      w *= (k < dHalf) ? 1.0f : -1.0f;

      //Read the values
      Complex4 even = values[m];
      Complex4 odd = values[m + dHalf];

      //Write results into temp values
      //y = even + w * odd

      //Real madd: Re(a+b*c) = Re(a)+Re(b)*Re(c)-Im(b)*Im(c)
      tempComplexValues[i].Real = even.Real + w.x * odd.Real - w.y * odd.Imaginary;

      //Imaginary madd: Im(a+b*c) = Im(a)+Re(b)*Im(c)+Im(b)*Re(c)
      tempComplexValues[i].Imaginary = even.Imaginary + w.x * odd.Imaginary + w.y * odd.Real;
    }

    //Sync to ensure that every value is read before writing the results
    GroupMemoryBarrierWithGroupSync();

    //Write the results into groupshared memory
    [unroll] //Do some FFTs iterative instead of parallel
    for (uint i = 0, iterationIndex = index; i < ITERATION_FACTOR; ++i, iterationIndex += ELEMENTS/ITERATION_FACTOR)
    {
      values[iterationIndex] = tempComplexValues[i];
    }

    //Sync to ensure that every value is written before calculating the other FFTs
    GroupMemoryBarrierWithGroupSync();
  }

  //Write the results
  [unroll] //Do some FFTs iterative instead of parallel
  for (uint i = 0, iterationIndex = index; i < ITERATION_FACTOR; ++i, iterationIndex += ELEMENTS/ITERATION_FACTOR)
  {
    uint3 savePosition = getIndexPosition(getSamplePosition(dispatchThreadId), iterationIndex);
    setValue(savePosition, getWeightedResult(values[iterationIndex]));
  }
}





Old topic!
Guest, the last post of this topic is over 60 days old and at this point you may not reply in this topic. If you wish to continue this conversation start a new topic.



PARTNERS