diff llvm/test/CodeGen/AMDGPU/addrspacecast.ll @ 252:1f2b6ac9f198 llvm-original

LLVM16-1
author Shinji KONO <kono@ie.u-ryukyu.ac.jp>
date Fri, 18 Aug 2023 09:04:13 +0900
parents c4bab56944e8
children
line wrap: on
line diff
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll	Wed Nov 09 17:47:54 2022 +0900
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll	Fri Aug 18 09:04:13 2023 +0900
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri --amdhsa-code-object-version=2 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=CI %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=2 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=GFX9 %s
 
 ; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast:
 ; HSA: enable_sgpr_private_segment_buffer = 1
@@ -13,15 +13,14 @@
 ; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
 ; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
 
+; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base
+
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
 ; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
-; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16)
-; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
 
-; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base
 ; GFX9: s_cmp_lg_u32 [[PTR]], -1
-; GFX9-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[SSRC_SHARED_BASE]], 0
-; GFX9-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
+; GFX9-DAG: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
+; GFX9-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0
 
 ; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
 
@@ -29,9 +28,9 @@
 ; number SGPR.
 
 ; HSA: NumSgprs: {{[0-9]+}}
-define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(3)* %ptr to i32*
-  store volatile i32 7, i32* %stof
+define amdgpu_kernel void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(3) %ptr to ptr
+  store volatile i32 7, ptr %stof
   ret void
 }
 
@@ -43,20 +42,19 @@
 ; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
 ; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0
 
+; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base
+
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16)
-; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
-; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_SHARED_BASE]]
 
-; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base
+; GFX9-DAG: v_mov_b32_e32 v[[VREG_HIBASE:[0-9]+]], s[[HIBASE]]
 ; GFX9-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
 ; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0, vcc
-; GFX9-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
+; GFX9-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, v[[VREG_HIBASE]], vcc
 
 ; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
-define void @use_group_to_flat_addrspacecast_func(i32 addrspace(3)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(3)* %ptr to i32*
-  store volatile i32 7, i32* %stof
+define void @use_group_to_flat_addrspacecast_func(ptr addrspace(3) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(3) %ptr to ptr
+  store volatile i32 7, ptr %stof
   ret void
 }
 
@@ -75,22 +73,19 @@
 ; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
 
 ; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
-; GFX9-DAG: s_getreg_b32 [[SSRC_PRIVATE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16)
-; GFX9-DAG: s_lshl_b32 [[SSRC_PRIVATE_BASE:s[0-9]+]], [[SSRC_PRIVATE]], 16
-
-; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_private_base
+; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_private_base
 
 ; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
 ; GFX9: s_cmp_lg_u32 [[PTR]], -1
-; GFX9: s_cselect_b32 s[[HI:[0-9]+]], [[SSRC_PRIVATE_BASE]], 0
-; GFX9: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
+; GFX9: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
+; GFX9: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0
 
 ; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
 
 ; HSA: NumSgprs: {{[0-9]+}}
-define amdgpu_kernel void @use_private_to_flat_addrspacecast(i32 addrspace(5)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(5)* %ptr to i32*
-  store volatile i32 7, i32* %stof
+define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(5) %ptr to ptr
+  store volatile i32 7, ptr %stof
   ret void
 }
 
@@ -103,9 +98,9 @@
 ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
 ; HSA: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]
-define amdgpu_kernel void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(1)* %ptr to i32*
-  store volatile i32 7, i32* %stof
+define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(1) %ptr to ptr
+  store volatile i32 7, ptr %stof
   ret void
 }
 
@@ -115,9 +110,9 @@
 ; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
 ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
 ; HSA: flat_load_dword v{{[0-9]+}}, v[[[VPTRLO]]:[[VPTRHI]]]
-define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(4)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(4)* %ptr to i32*
-  %ld = load volatile i32, i32* %stof
+define amdgpu_kernel void @use_constant_to_flat_addrspacecast(ptr addrspace(4) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(4) %ptr to ptr
+  %ld = load volatile i32, ptr %stof
   ret void
 }
 
@@ -129,9 +124,9 @@
 
 ; GFX9: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
 ; GFX9: global_load_dword v{{[0-9]+}}, [[ZERO:v[0-9]+]], s[[[PTRLO]]:[[PTRHI]]]
-define amdgpu_kernel void @use_constant_to_global_addrspacecast(i32 addrspace(4)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(1)*
-  %ld = load volatile i32, i32 addrspace(1)* %stof
+define amdgpu_kernel void @use_constant_to_global_addrspacecast(ptr addrspace(4) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(4) %ptr to ptr addrspace(1)
+  %ld = load volatile i32, ptr addrspace(1) %stof
   ret void
 }
 
@@ -151,9 +146,9 @@
 ; GFX9-DAG: v_mov_b32_e32 [[CASTPTR:v[0-9]+]], s[[PTR_LO]]
 ; CI-DAG: ds_write_b32 [[VCASTPTR]], v[[K]]
 ; GFX9-DAG: ds_write_b32 [[CASTPTR]], v[[K]]
-define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32* %ptr) #0 {
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(3)*
-  store volatile i32 0, i32 addrspace(3)* %ftos
+define amdgpu_kernel void @use_flat_to_group_addrspacecast(ptr %ptr) #0 {
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(3)
+  store volatile i32 0, ptr addrspace(3) %ftos
   ret void
 }
 
@@ -176,9 +171,9 @@
 ; GFX9-DAG: v_mov_b32_e32 [[CASTPTR:v[0-9]+]], s[[PTR_LO]]
 ; CI: buffer_store_dword v[[K]], [[VCASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
 ; GFX9: buffer_store_dword v[[K]], [[CASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
-define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32* %ptr) #0 {
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(5)*
-  store volatile i32 0, i32 addrspace(5)* %ftos
+define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) #0 {
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(5)
+  store volatile i32 0, ptr addrspace(5) %ftos
   ret void
 }
 
@@ -193,9 +188,9 @@
 
 ; GFX9: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
 ; GFX9: global_store_dword [[ZERO]], [[ZERO]], s[[[PTRLO]]:[[PTRHI]]{{\]$}}
-define amdgpu_kernel void @use_flat_to_global_addrspacecast(i32* %ptr) #0 {
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(1)*
-  store volatile i32 0, i32 addrspace(1)* %ftos
+define amdgpu_kernel void @use_flat_to_global_addrspacecast(ptr %ptr) #0 {
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(1)
+  store volatile i32 0, ptr addrspace(1) %ftos
   ret void
 }
 
@@ -204,27 +199,24 @@
 
 ; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]], s[4:5], 0x0
 ; HSA: s_load_dword s{{[0-9]+}}, s[[[PTRLO]]:[[PTRHI]]], 0x0
-define amdgpu_kernel void @use_flat_to_constant_addrspacecast(i32* %ptr) #0 {
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(4)*
-  load volatile i32, i32 addrspace(4)* %ftos
+define amdgpu_kernel void @use_flat_to_constant_addrspacecast(ptr %ptr) #0 {
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(4)
+  load volatile i32, ptr addrspace(4) %ftos
   ret void
 }
 
 ; HSA-LABEL: {{^}}cast_0_group_to_flat_addrspacecast:
 ; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10
 ; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
-; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16)
-; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
-; GFX9-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[SSRC_SHARED_BASE]]
 
-; GFX9-XXX: v_mov_b32_e32 v[[HI:[0-9]+]], src_shared_base
+; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_shared_base
 
 ; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
 define amdgpu_kernel void @cast_0_group_to_flat_addrspacecast() #0 {
-  %cast = addrspacecast i32 addrspace(3)* null to i32*
-  store volatile i32 7, i32* %cast
+  %cast = addrspacecast ptr addrspace(3) null to ptr
+  store volatile i32 7, ptr %cast
   ret void
 }
 
@@ -233,8 +225,8 @@
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
 ; HSA: ds_write_b32 [[PTR]], [[K]]
 define amdgpu_kernel void @cast_0_flat_to_group_addrspacecast() #0 {
-  %cast = addrspacecast i32* null to i32 addrspace(3)*
-  store volatile i32 7, i32 addrspace(3)* %cast
+  %cast = addrspacecast ptr null to ptr addrspace(3)
+  store volatile i32 7, ptr addrspace(3) %cast
   ret void
 }
 
@@ -244,8 +236,8 @@
 ; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
 define amdgpu_kernel void @cast_neg1_group_to_flat_addrspacecast() #0 {
-  %cast = addrspacecast i32 addrspace(3)* inttoptr (i32 -1 to i32 addrspace(3)*) to i32*
-  store volatile i32 7, i32* %cast
+  %cast = addrspacecast ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr
+  store volatile i32 7, ptr %cast
   ret void
 }
 
@@ -254,8 +246,8 @@
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
 ; HSA: ds_write_b32 [[PTR]], [[K]]
 define amdgpu_kernel void @cast_neg1_flat_to_group_addrspacecast() #0 {
-  %cast = addrspacecast i32* inttoptr (i64 -1 to i32*) to i32 addrspace(3)*
-  store volatile i32 7, i32 addrspace(3)* %cast
+  %cast = addrspacecast ptr inttoptr (i64 -1 to ptr) to ptr addrspace(3)
+  store volatile i32 7, ptr addrspace(3) %cast
   ret void
 }
 
@@ -263,18 +255,15 @@
 ; HSA-LABEL: {{^}}cast_0_private_to_flat_addrspacecast:
 ; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11
 ; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
-; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16)
-; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
-; GFX9-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[SSRC_SHARED_BASE]]
 
-; GFX9-XXX: v_mov_b32_e32 v[[HI:[0-9]+]], src_shared_base
+; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_private_base
 
 ; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
 define amdgpu_kernel void @cast_0_private_to_flat_addrspacecast() #0 {
-  %cast = addrspacecast i32 addrspace(5)* null to i32*
-  store volatile i32 7, i32* %cast
+  %cast = addrspacecast ptr addrspace(5) null to ptr
+  store volatile i32 7, ptr %cast
   ret void
 }
 
@@ -283,8 +272,8 @@
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
 ; HSA: buffer_store_dword [[K]], [[PTR]], s{{\[[0-9]+:[0-9]+\]}}, 0
 define amdgpu_kernel void @cast_0_flat_to_private_addrspacecast() #0 {
-  %cast = addrspacecast i32* null to i32 addrspace(5)*
-  store volatile i32 7, i32 addrspace(5)* %cast
+  %cast = addrspacecast ptr null to ptr addrspace(5)
+  store volatile i32 7, ptr addrspace(5) %cast
   ret void
 }
 
@@ -298,8 +287,8 @@
 ; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
 define amdgpu_kernel void @cast_neg1_private_to_flat_addrspacecast() #0 {
-  %cast = addrspacecast i32 addrspace(5)* inttoptr (i32 -1 to i32 addrspace(5)*) to i32*
-  store volatile i32 7, i32* %cast
+  %cast = addrspacecast ptr addrspace(5) inttoptr (i32 -1 to ptr addrspace(5)) to ptr
+  store volatile i32 7, ptr %cast
   ret void
 }
 
@@ -308,8 +297,8 @@
 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
 ; HSA: buffer_store_dword [[K]], [[PTR]], s{{\[[0-9]+:[0-9]+\]}}, 0
 define amdgpu_kernel void @cast_neg1_flat_to_private_addrspacecast() #0 {
-  %cast = addrspacecast i32* inttoptr (i64 -1 to i32*) to i32 addrspace(5)*
-  store volatile i32 7, i32 addrspace(5)* %cast
+  %cast = addrspacecast ptr inttoptr (i64 -1 to ptr) to ptr addrspace(5)
+  store volatile i32 7, ptr addrspace(5) %cast
   ret void
 }
 
@@ -320,24 +309,24 @@
 ; HSA-LABEL: {{^}}branch_use_flat_i32:
 ; HSA: {{flat|global}}_store_dword {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}
 ; HSA: s_endpgm
-define amdgpu_kernel void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 {
+define amdgpu_kernel void @branch_use_flat_i32(ptr addrspace(1) noalias %out, ptr addrspace(1) %gptr, ptr addrspace(3) %lptr, i32 %x, i32 %c) #0 {
 entry:
   %cmp = icmp ne i32 %c, 0
   br i1 %cmp, label %local, label %global
 
 local:
-  %flat_local = addrspacecast i32 addrspace(3)* %lptr to i32*
+  %flat_local = addrspacecast ptr addrspace(3) %lptr to ptr
   br label %end
 
 global:
-  %flat_global = addrspacecast i32 addrspace(1)* %gptr to i32*
+  %flat_global = addrspacecast ptr addrspace(1) %gptr to ptr
   br label %end
 
 end:
-  %fptr = phi i32* [ %flat_local, %local ], [ %flat_global, %global ]
-  store volatile i32 %x, i32* %fptr, align 4
-;  %val = load i32, i32* %fptr, align 4
-;  store i32 %val, i32 addrspace(1)* %out, align 4
+  %fptr = phi ptr [ %flat_local, %local ], [ %flat_global, %global ]
+  store volatile i32 %x, ptr %fptr, align 4
+;  %val = load i32, ptr %fptr, align 4
+;  store i32 %val, ptr addrspace(1) %out, align 4
   ret void
 }
 
@@ -353,16 +342,16 @@
 ; HSA: {{flat|global}}_store_dword
 ; HSA: s_barrier
 ; HSA: {{flat|global}}_load_dword
-define amdgpu_kernel void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 {
+define amdgpu_kernel void @store_flat_scratch(ptr addrspace(1) noalias %out, i32) #0 {
   %alloca = alloca i32, i32 9, align 4, addrspace(5)
   %x = call i32 @llvm.amdgcn.workitem.id.x() #2
-  %pptr = getelementptr i32, i32 addrspace(5)* %alloca, i32 %x
-  %fptr = addrspacecast i32 addrspace(5)* %pptr to i32*
-  store volatile i32 %x, i32* %fptr
+  %pptr = getelementptr i32, ptr addrspace(5) %alloca, i32 %x
+  %fptr = addrspacecast ptr addrspace(5) %pptr to ptr
+  store volatile i32 %x, ptr %fptr
   ; Dummy call
   call void @llvm.amdgcn.s.barrier() #1
-  %reload = load volatile i32, i32* %fptr, align 4
-  store volatile i32 %reload, i32 addrspace(1)* %out, align 4
+  %reload = load volatile i32, ptr %fptr, align 4
+  store volatile i32 %reload, ptr addrspace(1) %out, align 4
   ret void
 }
 
@@ -373,12 +362,11 @@
 ; GFX9: s_mov_b32 s[[PTR_HI]], 0{{$}}
 ; GFX9: s_add_i32 s[[PTR_LO]], s[[PTR_LO]], [[OFFSET]]
 ; GFX9: s_load_dword s{{[0-9]+}}, s[[[PTR_LO]]:[[PTR_HI]]], 0x0{{$}}
-define amdgpu_kernel void @use_constant_to_constant32_addrspacecast(i8 addrspace(4)* addrspace(4)* %ptr.ptr, i32 %offset) #0 {
-  %ptr = load volatile i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %ptr.ptr
-  %addrspacecast = addrspacecast i8 addrspace(4)* %ptr to i8 addrspace(6)*
-  %gep = getelementptr i8, i8 addrspace(6)* %addrspacecast, i32 %offset
-  %ptr.cast = bitcast i8 addrspace(6)* %gep to i32 addrspace(6)*
-  %load = load volatile i32, i32 addrspace(6)* %ptr.cast, align 4
+define amdgpu_kernel void @use_constant_to_constant32_addrspacecast(ptr addrspace(4) %ptr.ptr, i32 %offset) #0 {
+  %ptr = load volatile ptr addrspace(4), ptr addrspace(4) %ptr.ptr
+  %addrspacecast = addrspacecast ptr addrspace(4) %ptr to ptr addrspace(6)
+  %gep = getelementptr i8, ptr addrspace(6) %addrspacecast, i32 %offset
+  %load = load volatile i32, ptr addrspace(6) %gep, align 4
   ret void
 }
 
@@ -389,12 +377,11 @@
 ; GFX9: s_mov_b32 s[[PTR_HI]], 0{{$}}
 ; GFX9: s_add_i32 s[[PTR_LO]], s[[PTR_LO]], [[OFFSET]]
 ; GFX9: s_load_dword s{{[0-9]+}}, s[[[PTR_LO]]:[[PTR_HI]]], 0x0{{$}}
-define amdgpu_kernel void @use_global_to_constant32_addrspacecast(i8 addrspace(1)* addrspace(4)* %ptr.ptr, i32 %offset) #0 {
-  %ptr = load volatile i8 addrspace(1)*, i8 addrspace(1)* addrspace(4)* %ptr.ptr
-  %addrspacecast = addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(6)*
-  %gep = getelementptr i8, i8 addrspace(6)* %addrspacecast, i32 %offset
-  %ptr.cast = bitcast i8 addrspace(6)* %gep to i32 addrspace(6)*
-  %load = load volatile i32, i32 addrspace(6)* %ptr.cast, align 4
+define amdgpu_kernel void @use_global_to_constant32_addrspacecast(ptr addrspace(4) %ptr.ptr, i32 %offset) #0 {
+  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) %ptr.ptr
+  %addrspacecast = addrspacecast ptr addrspace(1) %ptr to ptr addrspace(6)
+  %gep = getelementptr i8, ptr addrspace(6) %addrspacecast, i32 %offset
+  %load = load volatile i32, ptr addrspace(6) %gep, align 4
   ret void
 }
 
@@ -403,9 +390,9 @@
 ; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0
 ; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], [[PTR]]
 ; GCN: flat_load_dword v{{[0-9]+}}, v[[[LO]]:[[HI]]]
-define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast_0(i32 addrspace(6)* %ptr) #0 {
-  %stof = addrspacecast i32 addrspace(6)* %ptr to i32*
-  %load = load volatile i32, i32* %stof
+define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast_0(ptr addrspace(6) %ptr) #0 {
+  %stof = addrspacecast ptr addrspace(6) %ptr to ptr
+  %load = load volatile i32, ptr %stof
   ret void
 }
 
@@ -414,9 +401,9 @@
 ; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0xffff8000
 ; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], [[PTR]]
 ; GCN: flat_load_dword v{{[0-9]+}}, v[[[LO]]:[[HI]]]
-define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast_1(i32 addrspace(6)* %ptr) #3 {
-  %stof = addrspacecast i32 addrspace(6)* %ptr to i32*
-  %load = load volatile i32, i32* %stof
+define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast_1(ptr addrspace(6) %ptr) #3 {
+  %stof = addrspacecast ptr addrspace(6) %ptr to ptr
+  %load = load volatile i32, ptr %stof
   ret void
 }
 
@@ -427,3 +414,6 @@
 attributes #1 = { nounwind convergent }
 attributes #2 = { nounwind readnone }
 attributes #3 = { nounwind "amdgpu-32bit-address-high-bits"="0xffff8000" }
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 1, !"amdgpu_code_object_version", i32 200}