@@ -2235,87 +2235,42 @@ impl super::Adapter {
2235
2235
signal_semaphores : Default :: default ( ) ,
2236
2236
} ;
2237
2237
2238
- let mem_allocator = {
2239
- let limits = self . phd_capabilities . properties . limits ;
2240
-
2241
- // Note: the parameters here are not set in stone nor where they picked with
2242
- // strong confidence.
2243
- // `final_free_list_chunk` should be bigger than starting_free_list_chunk if
2244
- // we want the behavior of starting with smaller block sizes and using larger
2245
- // ones only after we observe that the small ones aren't enough, which I think
2246
- // is a good "I don't know what the workload is going to be like" approach.
2247
- //
2248
- // For reference, `VMA`, and `gpu_allocator` both start with 256 MB blocks
2249
- // (then VMA doubles the block size each time it needs a new block).
2250
- // At some point it would be good to experiment with real workloads
2251
- //
2252
- // TODO(#5925): The plan is to switch the Vulkan backend from `gpu_alloc` to
2253
- // `gpu_allocator` which has a different (simpler) set of configuration options.
2254
- //
2255
- // TODO: These parameters should take hardware capabilities into account.
2256
- let mb = 1024 * 1024 ;
2257
- let perf_cfg = gpu_alloc:: Config {
2258
- starting_free_list_chunk : 128 * mb,
2259
- final_free_list_chunk : 512 * mb,
2260
- minimal_buddy_size : 1 ,
2261
- initial_buddy_dedicated_size : 8 * mb,
2262
- dedicated_threshold : 32 * mb,
2263
- preferred_dedicated_threshold : mb,
2264
- transient_dedicated_threshold : 128 * mb,
2265
- } ;
2266
- let mem_usage_cfg = gpu_alloc:: Config {
2267
- starting_free_list_chunk : 8 * mb,
2268
- final_free_list_chunk : 64 * mb,
2269
- minimal_buddy_size : 1 ,
2270
- initial_buddy_dedicated_size : 8 * mb,
2271
- dedicated_threshold : 8 * mb,
2272
- preferred_dedicated_threshold : mb,
2273
- transient_dedicated_threshold : 16 * mb,
2274
- } ;
2275
- let config = match memory_hints {
2276
- wgt:: MemoryHints :: Performance => perf_cfg,
2277
- wgt:: MemoryHints :: MemoryUsage => mem_usage_cfg,
2278
- wgt:: MemoryHints :: Manual {
2279
- suballocated_device_memory_block_size,
2280
- } => gpu_alloc:: Config {
2281
- starting_free_list_chunk : suballocated_device_memory_block_size. start ,
2282
- final_free_list_chunk : suballocated_device_memory_block_size. end ,
2283
- initial_buddy_dedicated_size : suballocated_device_memory_block_size. start ,
2284
- ..perf_cfg
2285
- } ,
2286
- } ;
2287
-
2288
- let max_memory_allocation_size =
2289
- if let Some ( maintenance_3) = self . phd_capabilities . maintenance_3 {
2290
- maintenance_3. max_memory_allocation_size
2291
- } else {
2292
- u64:: MAX
2293
- } ;
2294
- let properties = gpu_alloc:: DeviceProperties {
2295
- max_memory_allocation_count : limits. max_memory_allocation_count ,
2296
- max_memory_allocation_size,
2297
- non_coherent_atom_size : limits. non_coherent_atom_size ,
2298
- memory_types : memory_types
2299
- . iter ( )
2300
- . map ( |memory_type| gpu_alloc:: MemoryType {
2301
- props : gpu_alloc:: MemoryPropertyFlags :: from_bits_truncate (
2302
- memory_type. property_flags . as_raw ( ) as u8 ,
2303
- ) ,
2304
- heap : memory_type. heap_index ,
2305
- } )
2306
- . collect ( ) ,
2307
- memory_heaps : mem_properties
2308
- . memory_heaps_as_slice ( )
2309
- . iter ( )
2310
- . map ( |& memory_heap| gpu_alloc:: MemoryHeap {
2311
- size : memory_heap. size ,
2312
- } )
2313
- . collect ( ) ,
2314
- buffer_device_address : enabled_extensions
2315
- . contains ( & khr:: buffer_device_address:: NAME ) ,
2316
- } ;
2317
- gpu_alloc:: GpuAllocator :: new ( config, properties)
2238
+ // TODO: the allocator's configuration should take hardware capability into
2239
+ // account.
2240
+ const MB : u64 = 1024 * 1024 ;
2241
+ let allocation_sizes = match memory_hints {
2242
+ wgt:: MemoryHints :: Performance => gpu_allocator:: AllocationSizes :: new ( 128 * MB , 64 * MB )
2243
+ . with_max_device_memblock_size ( 256 * MB )
2244
+ . with_max_host_memblock_size ( 128 * MB ) ,
2245
+ wgt:: MemoryHints :: MemoryUsage => gpu_allocator:: AllocationSizes :: new ( 8 * MB , 4 * MB )
2246
+ . with_max_device_memblock_size ( 64 * MB )
2247
+ . with_max_host_memblock_size ( 32 * MB ) ,
2248
+ wgt:: MemoryHints :: Manual {
2249
+ suballocated_device_memory_block_size,
2250
+ } => {
2251
+ // TODO: Would it be useful to expose the host size in memory hints
2252
+ // instead of always using half of the device size?
2253
+ let device_size = suballocated_device_memory_block_size;
2254
+ let host_size = device_size. start / 2 ..device_size. end / 2 ;
2255
+
2256
+ gpu_allocator:: AllocationSizes :: new ( device_size. start , host_size. start )
2257
+ . with_max_device_memblock_size ( device_size. end )
2258
+ . with_max_host_memblock_size ( host_size. end )
2259
+ }
2318
2260
} ;
2261
+
2262
+ let buffer_device_address = enabled_extensions. contains ( & khr:: buffer_device_address:: NAME ) ;
2263
+
2264
+ let mem_allocator =
2265
+ gpu_allocator:: vulkan:: Allocator :: new ( & gpu_allocator:: vulkan:: AllocatorCreateDesc {
2266
+ instance : self . instance . raw . clone ( ) ,
2267
+ device : shared. raw . clone ( ) ,
2268
+ physical_device : self . raw ,
2269
+ debug_settings : Default :: default ( ) ,
2270
+ buffer_device_address,
2271
+ allocation_sizes,
2272
+ } ) ?;
2273
+
2319
2274
let desc_allocator = gpu_descriptor:: DescriptorAllocator :: new (
2320
2275
if let Some ( di) = self . phd_capabilities . descriptor_indexing {
2321
2276
di. max_update_after_bind_descriptors_in_all_pools
0 commit comments