<output id="qn6qe"></output>

    1. <output id="qn6qe"><tt id="qn6qe"></tt></output>
    2. <strike id="qn6qe"></strike>

      亚洲 日本 欧洲 欧美 视频,日韩中文字幕有码av,一本一道av中文字幕无码,国产线播放免费人成视频播放,人妻少妇偷人无码视频,日夜啪啪一区二区三区,国产尤物精品自在拍视频首页,久热这里只有精品12

      Ceph集群性能基準(壓力)測試

                                                    作者:尹正杰

      版權聲明:原創作品,謝絕轉載!否則將追究法律責任。

      一.rados bench工具實現基準評測Ceph性能

      1.rados bench概述

      rados bench是Ceph自帶的性能測試工具,主要用于評估RADOS存儲池的寫、順序讀、隨機讀性能。
      
      參考鏈接:
      	https://www.ibm.com/docs/zh/storage-ceph/7.0.0?topic=benchmark-benchmarking-ceph-performance
      

      2.環境準備

      [root@ceph141 ~]# ceph osd pool create JasonYin2020
      pool 'JasonYin2020' created
      [root@ceph141 ~]# 
      

      3.寫性能測試

      [root@ceph141 ~]# rados bench -p JasonYin2020 30 write -b 4M -t 16 --no-cleanup
      hints = 1
      Maintaining 16 concurrent writes of 4194304 bytes to objects of size 4194304 for up to 30 seconds or 0 objects
      Object prefix: benchmark_data_ceph141_76435
        sec Cur ops   started  finished  avg MB/s  cur MB/s last lat(s)  avg lat(s)
          0      16        16         0         0         0           -           0
          1      16        35        19   75.9116        76   0.0875339     0.45294
          2      16        77        61   121.906       168    0.327283    0.455255
          3      16       117       101   134.586       160    0.594012    0.436621
          4      16       158       142   141.922       164    0.332936    0.411539
          5      16       207       191   152.729       196    0.730204     0.40622
          6      16       255       239   159.268       192    0.234069       0.387
          7      16       302       286   163.366       188     0.70147    0.379653
          8      16       346       330   164.942       176    0.625414     0.37525
          9      16       388       372   165.278       168    0.187506    0.377352
         10      16       435       419   167.543       188    0.265819    0.374599
         11      16       477       461    167.58       168    0.193465    0.374191
         12      16       525       509   169.611       192    0.252746    0.369712
         13      16       567       551   169.454       168    0.707838     0.37072
         14      16       605       589   168.178       152    0.180226    0.373712
         15      16       640       624   166.293       140    0.278596    0.375678
         16      16       680       664   165.895       160    0.433091    0.377307
         17      16       728       712   167.427       192    0.267873    0.378908
         18      16       769       753   167.143       164     0.26837    0.378465
         19      16       809       793   166.764       160     0.25623    0.378377
      2025-09-17T14:42:35.950863+0800 min lat: 0.0330067 max lat: 1.37957 avg lat: 0.378676
        sec Cur ops   started  finished  avg MB/s  cur MB/s last lat(s)  avg lat(s)
         20      16       848       832   166.223       156     0.35235    0.378676
         21      16       891       875   166.495       172    0.152146    0.379543
         22      16       930       914   166.017       156    0.144854    0.379113
         23      16       970       954   165.754       160    0.250361    0.380221
         24      16      1012       996   165.845       168    0.736403     0.38271
         25      16      1052      1036    165.61       160     0.29337    0.382724
         26      16      1091      1075   165.238       156    0.688238    0.383449
         27      16      1138      1122   166.076       188    0.565968    0.382761
         28      16      1188      1172   167.283       200    0.288764    0.380345
         29      16      1229      1213   167.166       164    0.303851    0.380236
         30      16      1271      1255   167.191       168    0.464374      0.3793
      Total time run:         30.1847   # 寫入的時間
      Total writes made:      1271      # 寫入次數
      Write size:             4194304   # 寫入數據量大小[單位為Bytes,對應為4MB,可以使用-b選項修改喲~]
      Object size:            4194304   # 對象的大小,單位為Bytes,和上面的-b選項有關,4MB
      Bandwidth (MB/sec):     168.43    # 帶寬(MB/秒)	*****
      Stddev Bandwidth:       22.8237   # 標準差帶寬
      Max bandwidth (MB/sec): 200       # 最大帶寬
      Min bandwidth (MB/sec): 76        # 最小帶寬
      Average IOPS:           42        # 平均IOPS(每秒讀寫次數)*****
      Stddev IOPS:            5.70592   # 標準差IOPS
      Max IOPS:               50        # 最大IOPS(每秒讀寫次數)
      Min IOPS:               19        # 最小IOPS(每秒讀寫次數):19
      Average Latency(s):     0.379276  # 平均延遲(秒)		*****
      Stddev Latency(s):      0.223052  # 標準差延遲(秒)
      Max latency(s):         1.37957   # 最大延遲(秒)
      Min latency(s):         0.0330067 # 最小延遲(秒)
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rados df  | egrep "JasonYin2020|POOL_NAME"
      POOL_NAME                      USED  OBJECTS  CLONES  COPIES  MISSING_ON_PRIMARY  UNFOUND  DEGRADED  RD_OPS       RD  WR_OPS       WR  USED COMPR  UNDER COMPR
      JasonYin2020                 15 GiB     1272       0    3816                   0        0         0       0      0 B    1272  5.0 GiB         0 B          0 B
      [root@ceph141 ~]# 
      [root@ceph141 ~]# echo 4194304/1024/1024|bc
      4
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rados bench -p JasonYin2020 30 write -b 8M -t 16 --no-cleanup
      hints = 1
      Maintaining 16 concurrent writes of 8388608 bytes to objects of size 8388608 for up to 30 seconds or 0 objects
      Object prefix: benchmark_data_ceph141_90192
        sec Cur ops   started  finished  avg MB/s  cur MB/s last lat(s)  avg lat(s)
          0       0         0         0         0         0           -           0
          1      15        32        17   135.899       136    0.484957    0.579675
          2      15        55        40   159.907       184    0.276308    0.599671
          3      15        79        64   170.485       192     1.06627    0.673351
          4      15       102        87   173.853       184    0.261028    0.629182
          5      15       129       114    182.26       216    0.321719    0.646102
          6      15       155       140   186.542       208     1.42941    0.636997
          7      15       183       168   191.841       224     1.30052    0.617982
          8      15       212       197   196.849       232    0.538708    0.617369
          9      15       241       226   200.744       232    0.464318    0.608606
         10      15       269       254   203.058       224    0.700391    0.609036
         11      15       297       282   204.956       224    0.754455    0.604194
         12      15       326       311    207.19       232    0.443682    0.597935
         13      15       352       337   207.239       208    0.951104    0.597437
         14      15       382       367   209.575       240    0.520775    0.594501
         15      15       408       393   209.459       208    0.265721    0.593673
         16      15       434       419   209.365       208    0.382622    0.594403
         17      15       459       444   208.787       200    0.556966    0.597566
         18      15       478       463   205.631       152    0.396002    0.605588
         19      15       501       486   204.453       184    0.900571    0.609092
      2025-09-17T16:05:46.783097+0800 min lat: 0.14777 max lat: 1.86189 avg lat: 0.605509
        sec Cur ops   started  finished  avg MB/s  cur MB/s last lat(s)  avg lat(s)
         20      15       532       517   206.626       248    0.758761    0.605509
         21      15       560       545    207.45       224     0.43028    0.607474
         22      15       589       574   208.564       232    0.515777    0.603638
         23      15       614       599   208.165       200    0.723795     0.60354
         24      15       643       628   209.154       232    0.625574    0.602199
         25      15       668       653   208.786       200    0.623777    0.600819
         26      15       695       680   209.058       216    0.288917    0.600689
         27      15       725       710     210.2       240    0.520903    0.601474
         28      15       748       733   209.264       184    0.736457    0.600513
         29      15       778       763   210.322       240     0.42654    0.599972
         30      12       805       793   211.306       240    0.649407    0.600597
      Total time run:         30.3276
      Total writes made:      805
      Write size:             8388608
      Object size:            8388608
      Bandwidth (MB/sec):     212.347
      Stddev Bandwidth:       26.5482
      Max bandwidth (MB/sec): 248
      Min bandwidth (MB/sec): 136
      Average IOPS:           26
      Stddev IOPS:            3.31853
      Max IOPS:               31
      Min IOPS:               17
      Average Latency(s):     0.598054
      Stddev Latency(s):      0.252862
      Max latency(s):         1.86189
      Min latency(s):         0.14777
      [root@ceph141 ~]# 
      

      4.順序讀測試

      [root@ceph141 ~]# rados bench -p JasonYin2020 10 seq  -t 16
      hints = 1
        sec Cur ops   started  finished  avg MB/s  cur MB/s last lat(s)  avg lat(s)
          0       0         0         0         0         0           -           0
          1      16       148       132   527.852       528   0.0209347   0.0963214
          2      16       286       270   539.841       552    0.466894    0.108418
          3      16       408       392   521.602       488   0.0701871    0.114623
          4      16       540       524    523.18       528   0.0244368     0.11458
          5      16       665       649    518.47       500    0.111208    0.118413
          6      16       796       780    519.38       524   0.0285209     0.11887
          7      16       911       895   510.895       460    0.368056    0.121244
          8      16      1033      1017   508.012       488     0.47419     0.12218
          9      16      1144      1128   500.649       444    0.178578    0.124281
         10      11      1268      1257   502.165       516   0.0188324    0.124061
      Total time run:       10.1118
      Total reads made:     1268
      Read size:            4194304
      Object size:          4194304
      Bandwidth (MB/sec):   501.591  # 不難發現,咱們的ceph集群順序讀取性能要比寫入性能高。
      Average IOPS:         125
      Stddev IOPS:          8.35397
      Max IOPS:             138
      Min IOPS:             111
      Average Latency(s):   0.125749
      Max latency(s):       0.659622
      Min latency(s):       0.0059344
      [root@ceph141 ~]# 
      [root@ceph141 ~]# echo 8388608/1024/1024|bc
      8
      [root@ceph141 ~]# 
      

      5.隨機讀測試

      [root@ceph141 ~]# echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo sync  # 隨機讀取之前要清空緩存。
      3
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rados bench -p JasonYin2020 10 rand  -t 16
      hints = 1
        sec Cur ops   started  finished  avg MB/s  cur MB/s last lat(s)  avg lat(s)
          0       0         0         0         0         0           -           0
          1      16       135       119   475.875       476    0.154917    0.109734
          2      16       283       267   533.335       592   0.0805749    0.110209
          3      16       423       407   542.194       560   0.0498927    0.112117
          4      16       557       541   540.616       536   0.0656441    0.112463
          5      16       685       669   534.851       512   0.0896034    0.113753
          6      16       799       783   521.596       456   0.0356925    0.118029
          7      16       925       909   519.074       504    0.014017    0.119841
          8      16      1037      1021   510.086       448   0.0280757    0.121386
          9      16      1153      1137   504.938       464     0.13386    0.123689
         10      13      1290      1277   510.415       560  0.00783718    0.122106
      Total time run:       10.1043
      Total reads made:     1290
      Read size:            4194304
      Object size:          4194304
      Bandwidth (MB/sec):   510.675  # 不難發現,隨機讀性能也不差!
      Average IOPS:         127
      Stddev IOPS:          12.4904
      Max IOPS:             148
      Min IOPS:             112
      Average Latency(s):   0.123349
      Max latency(s):       0.648618
      Min latency(s):       0.00488359
      [root@ceph141 ~]# 
      

      6.清理測試數據

      [root@ceph141 ~]# rados df  | egrep "JasonYin2020|POOL_NAME"
      POOL_NAME                      USED  OBJECTS  CLONES  COPIES  MISSING_ON_PRIMARY  UNFOUND  DEGRADED  RD_OPS       RD  WR_OPS       WR  USED COMPR  UNDER COMPR
      JasonYin2020                 15 GiB     1272       0    3816                   0        0         0    5398   21 GiB    1272  5.0 GiB         0 B          0 B
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rados -p JasonYin2020 cleanup
      Removed 1271 objects
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rados df  | egrep "JasonYin2020|POOL_NAME"
      POOL_NAME                      USED  OBJECTS  CLONES  COPIES  MISSING_ON_PRIMARY  UNFOUND  DEGRADED  RD_OPS       RD  WR_OPS       WR  USED COMPR  UNDER COMPR
      JasonYin2020                    0 B        0       0       0                   0        0         0    5399   21 GiB    2544  5.0 GiB         0 B          0 B
      [root@ceph141 ~]# 
      

      二.rbd bench工具基準測試Ceph塊性能

      1.基準測試 Ceph 塊性能測試

      Ceph 包含 rbd bench-write 命令,用于測試對塊設備的順序寫入,以測量吞吐量和等待時間。
      
      缺省字節大小為 4096 ,缺省 I/O 線程數為 16 ,要寫入的缺省總字節數為 1 GB。 這些缺省值可分別由 --io-size, --io-threads 和 --io-total 選項修改。
      
      
      參考鏈接:
      	https://www.ibm.com/docs/zh/storage-ceph/7.0.0?topic=benchmark-benchmarking-ceph-block-performance
      

      2.創建測試塊設備

      [root@ceph141 ~]# rbd ls -l -p JasonYin2020
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rbd create --size 10G JasonYin2020/linux99
      [root@ceph141 ~]# 
      [root@ceph141 ~]# rbd ls -l -p JasonYin2020
      NAME     SIZE    PARENT  FMT  PROT  LOCK
      linux99  10 GiB            2            
      [root@ceph141 ~]# 
      

      3.塊設備寫性能測試案例

      [root@ceph141 ~]# rbd bench --io-type write linux99 --pool=JasonYin2020
      bench  type write io_size 4096 io_threads 16 bytes 1073741824 pattern sequential
        SEC       OPS   OPS/SEC   BYTES/SEC
          1     27168   27293.2   107 MiB/s
          2     58000   29066.1   114 MiB/s
          3     88448   29527.4   115 MiB/s
          4    118624   29689.7   116 MiB/s
          5    148672   29737.6   116 MiB/s
          6    175168     29600   116 MiB/s
          7    206304   29660.8   116 MiB/s
          8    238144   29939.2   117 MiB/s
      elapsed: 8   ops: 262144   ops/sec: 29993.6   bytes/sec: 117 MiB/s
      [root@ceph141 ~]# 
      

      4.塊設備讀性能測試

      [root@ceph141 ~]# rbd bench --io-type read linux99 --pool=JasonYin2020
      bench  type read io_size 4096 io_threads 16 bytes 1073741824 pattern sequential
        SEC       OPS   OPS/SEC   BYTES/SEC
          1      8672   8722.88    34 MiB/s
          2     16896   8472.94    33 MiB/s
          3     24944    8331.1    33 MiB/s
          4     32992   8260.25    32 MiB/s
          5     39904   7990.38    31 MiB/s
          6     46144   7494.39    29 MiB/s
          7     52896   7199.99    28 MiB/s
          8     61200   7251.19    28 MiB/s
          9     69520   7305.59    29 MiB/s
         10     77664   7551.99    29 MiB/s
         11     85904   7951.99    31 MiB/s
         12     94144   8249.59    32 MiB/s
         13    101888   8137.59    32 MiB/s
         14    109600   8015.99    31 MiB/s
         15    116784   7823.99    31 MiB/s
         16    124096   7638.39    30 MiB/s
         17    131632   7497.59    29 MiB/s
         18    138528   7327.99    29 MiB/s
         19    144784   7036.79    27 MiB/s
         20    151936   7030.39    27 MiB/s
         21    159344   7049.59    28 MiB/s
         22    167744   7222.39    28 MiB/s
         23    177200   7734.39    30 MiB/s
         24    184656   7974.39    31 MiB/s
         25    191344   7881.59    31 MiB/s
         26    198896   7910.39    31 MiB/s
         27    206736   7798.39    30 MiB/s
         28    215616   7683.19    30 MiB/s
         29    224176   7903.99    31 MiB/s
         30    233600   8451.19    33 MiB/s
         31    242512   8723.19    34 MiB/s
         32    251344   8921.59    35 MiB/s
         33    258992   8675.19    34 MiB/s
      elapsed: 33   ops: 262144   ops/sec: 7847.67   bytes/sec: 31 MiB/s
      [root@ceph141 ~]# 
      

      三.FIO工具測試CephFS基準測試【建議使用scp測試,發現fio測試需要進一步研究】

      1.FIO概述

      使用 FIO 工具對 Ceph File System (CephFS) 性能進行基準測試。
      
      參考鏈接:
      	https://www.ibm.com/docs/zh/storage-ceph/7.0.0?topic=benchmark-benchmarking-cephfs-performance
      

      2.安裝fio程序

      [root@ceph143 ~]# apt -y install fio
      

      3.掛載cephfs

      	1.服務端創建用戶
      [root@ceph141 ~]# ceph auth add  client.cephfs mon 'allow r' mds 'allow rw' osd 'allow rwx'
      [root@ceph141 ~]# 
      [root@ceph141 ~]# ceph auth get client.cephfs
      [client.cephfs]
      	key = AQAu3shoEApwHBAAtzhU7vNjtormRMyAR7wINw==
      	caps mds = "allow rw"
      	caps mon = "allow r"
      	caps osd = "allow rwx"
      [root@ceph141 ~]# 
      
      
      	2.客戶端掛載
      [root@ceph143 ~]# mkdir /data
      [root@ceph143 ~]# mount -t ceph 10.0.0.141:6789,10.0.0.142:6789,10.0.0.143:6789:/ /data -o name=cephfs,secret=AQAu3shoEApwHBAAtzhU7vNjtormRMyAR7wINw==
      [root@ceph143 ~]# 
      [root@ceph143 ~]# df -h | grep data
      10.0.0.141:6789,10.0.0.142:6789,10.0.0.143:6789:/  1.7T  280M  1.7T   1% /data
      [root@ceph143 ~]# 
      

      4.運行 FIO 命令測試驗證

      從4k開始bs值,并以2個增量重復 (4k, 8k, 16k, 32k ... 128k... 512k, 1 m , 2 m , 4 m) 以及不同的iodepth設置。 
      	
      還可按期望的工作負載操作大小運行測試。例如,具有不同 iodepth 值的 4 K 檢驗:
      	
      測試代碼:
      fio --name=randwrite --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=1 --size=5G --runtime=60 --group_reporting=1
      fio --name=randwrite1 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=32  --size=5G --runtime=60 --group_reporting=1
      fio --name=randwrite2 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=64  --size=5G --runtime=60 --group_reporting=1
      fio --name=randwrite3 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=128  --size=5G --runtime=60 --group_reporting=1
      
      
      實戰演練: 
      [root@ceph143 ~]# cd /data/
      [root@ceph143 data]# 
      [root@ceph143 data]# ll
      total 4
      drwxr-xr-x  5 root root    3 Sep 16 15:21 ./
      drwxr-xr-x 22 root root 4096 Sep 17 11:51 ../
      drwxr-xr-x  2 root root    2 Sep 16 14:56 haha/
      drwxr-xr-x  3 root root    1 Sep 16 15:21 linux99/
      drwxr-xr-x  2 root root    2 Sep 16 14:57 xixi/
      [root@ceph143 data]# 
      [root@ceph143 data]# fio --name=randwrite --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=1 --size=5G --runtime=60 --group_reporting=1
      _reporting=1randwrite: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=1
      fio-3.28
      Starting 1 process
      randwrite: Laying out IO file (1 file / 5120MiB)
      Jobs: 1 (f=1): [w(1)][100.0%][w=1160KiB/s][w=290 IOPS][eta 00m:00s]
      randwrite: (groupid=0, jobs=1): err= 0: pid=38287: Wed Sep 17 11:54:55 2025
        write: IOPS=294, BW=1177KiB/s (1205kB/s)(69.0MiB/60002msec); 0 zone resets
          slat (usec): min=11, max=7400, avg=24.96, stdev=61.94
          clat (usec): min=549, max=405146, avg=3368.37, stdev=3833.47
           lat (usec): min=1007, max=405166, avg=3393.69, stdev=3834.47
          clat percentiles (usec):
           |  1.00th=[ 1156],  5.00th=[ 1270], 10.00th=[ 1336], 20.00th=[ 1467],
           | 30.00th=[ 1614], 40.00th=[ 1876], 50.00th=[ 2311], 60.00th=[ 3032],
           | 70.00th=[ 3949], 80.00th=[ 5342], 90.00th=[ 7177], 95.00th=[ 8225],
           | 99.00th=[10290], 99.50th=[11076], 99.90th=[13304], 99.95th=[13829],
           | 99.99th=[17171]
         bw (  KiB/s): min=  335, max= 1368, per=100.00%, avg=1178.35, stdev=103.63, samples=119
         iops        : min=   83, max=  342, avg=294.57, stdev=25.96, samples=119
        lat (usec)   : 750=0.01%, 1000=0.01%
        lat (msec)   : 2=43.43%, 4=27.23%, 10=28.09%, 20=1.22%, 500=0.01%
        cpu          : usr=0.56%, sys=0.61%, ctx=17718, majf=0, minf=11
        IO depths    : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           issued rwts: total=0,17653,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=1
      
      Run status group 0 (all jobs):
        WRITE: bw=1177KiB/s (1205kB/s), 1177KiB/s-1177KiB/s (1205kB/s-1205kB/s), io=69.0MiB (72.3MB), run=60002-60002msec
      [root@ceph143 data]# 
      [root@ceph143 data]#  fio --name=randwrite1 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=32  --size=5G --runtime=60 --group_reporting=1
      randwrite1: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=32
      fio-3.28
      Starting 1 process
      randwrite1: Laying out IO file (1 file / 5120MiB)
      Jobs: 1 (f=1): [w(1)][100.0%][w=8860KiB/s][w=2215 IOPS][eta 00m:00s]
      randwrite1: (groupid=0, jobs=1): err= 0: pid=38302: Wed Sep 17 11:55:55 2025
        write: IOPS=2047, BW=8189KiB/s (8386kB/s)(480MiB/60008msec); 0 zone resets
          slat (usec): min=2, max=15080, avg=18.51, stdev=57.92
          clat (usec): min=1142, max=114219, avg=15607.09, stdev=9487.44
           lat (usec): min=1166, max=114249, avg=15625.90, stdev=9488.41
          clat percentiles (usec):
           |  1.00th=[ 2245],  5.00th=[ 3359], 10.00th=[ 4555], 20.00th=[ 7046],
           | 30.00th=[ 9372], 40.00th=[11731], 50.00th=[14222], 60.00th=[16712],
           | 70.00th=[19530], 80.00th=[23200], 90.00th=[28181], 95.00th=[32637],
           | 99.00th=[43254], 99.50th=[47973], 99.90th=[62653], 99.95th=[69731],
           | 99.99th=[83362]
         bw (  KiB/s): min= 5720, max= 9184, per=99.99%, avg=8188.44, stdev=686.83, samples=119
         iops        : min= 1430, max= 2296, avg=2047.13, stdev=171.73, samples=119
        lat (msec)   : 2=0.43%, 4=7.19%, 10=24.92%, 20=38.87%, 50=28.19%
        lat (msec)   : 100=0.40%, 250=0.01%
        cpu          : usr=1.07%, sys=4.55%, ctx=75674, majf=0, minf=9
        IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=100.0%, >=64=0.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.1%, 64=0.0%, >=64=0.0%
           issued rwts: total=0,122852,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=32
      
      Run status group 0 (all jobs):
        WRITE: bw=8189KiB/s (8386kB/s), 8189KiB/s-8189KiB/s (8386kB/s-8386kB/s), io=480MiB (503MB), run=60008-60008msec
      [root@ceph143 data]# 
      [root@ceph143 data]#  fio --name=randwrite2 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=64  --size=5G --runtime=60 --group_reporting=1
      randwrite2: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=64
      fio-3.28
      Starting 1 process
      randwrite2: Laying out IO file (1 file / 5120MiB)
      Jobs: 1 (f=1): [w(1)][100.0%][w=9876KiB/s][w=2469 IOPS][eta 00m:00s]
      randwrite2: (groupid=0, jobs=1): err= 0: pid=38320: Wed Sep 17 11:56:56 2025
        write: IOPS=2233, BW=8935KiB/s (9149kB/s)(524MiB/60007msec); 0 zone resets
          slat (usec): min=2, max=3319, avg=18.63, stdev=33.53
          clat (usec): min=1200, max=151011, avg=28626.99, stdev=18202.94
           lat (usec): min=1223, max=151030, avg=28645.92, stdev=18204.03
          clat percentiles (msec):
           |  1.00th=[    3],  5.00th=[    5], 10.00th=[    8], 20.00th=[   12],
           | 30.00th=[   17], 40.00th=[   22], 50.00th=[   27], 60.00th=[   32],
           | 70.00th=[   37], 80.00th=[   43], 90.00th=[   53], 95.00th=[   62],
           | 99.00th=[   83], 99.50th=[   92], 99.90th=[  114], 99.95th=[  121],
           | 99.99th=[  138]
         bw (  KiB/s): min= 5240, max=10824, per=99.92%, avg=8928.57, stdev=970.93, samples=119
         iops        : min= 1310, max= 2706, avg=2232.14, stdev=242.73, samples=119
        lat (msec)   : 2=0.16%, 4=3.37%, 10=12.22%, 20=21.15%, 50=50.95%
        lat (msec)   : 100=11.87%, 250=0.28%
        cpu          : usr=1.29%, sys=4.44%, ctx=71681, majf=0, minf=10
        IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=100.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.1%, >=64=0.0%
           issued rwts: total=0,134039,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=64
      
      Run status group 0 (all jobs):
        WRITE: bw=8935KiB/s (9149kB/s), 8935KiB/s-8935KiB/s (9149kB/s-9149kB/s), io=524MiB (549MB), run=60007-60007msec
      [root@ceph143 data]# 
      [root@ceph143 data]#  fio --name=randwrite3 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=128  --size=5G --runtime=60 --group_reporting=1
      randwrite3: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=128
      fio-3.28
      Starting 1 process
      randwrite3: Laying out IO file (1 file / 5120MiB)
      Jobs: 1 (f=0): [f(1)][100.0%][w=9.89MiB/s][w=2532 IOPS][eta 00m:00s]
      randwrite3: (groupid=0, jobs=1): err= 0: pid=38533: Wed Sep 17 11:57:59 2025
        write: IOPS=2500, BW=9.77MiB/s (10.2MB/s)(586MiB/60015msec); 0 zone resets
          slat (usec): min=2, max=2945, avg=14.71, stdev=17.47
          clat (usec): min=1314, max=428679, avg=51171.65, stdev=36322.58
           lat (usec): min=1328, max=428683, avg=51186.60, stdev=36322.52
          clat percentiles (msec):
           |  1.00th=[    3],  5.00th=[    7], 10.00th=[   11], 20.00th=[   20],
           | 30.00th=[   28], 40.00th=[   37], 50.00th=[   46], 60.00th=[   56],
           | 70.00th=[   66], 80.00th=[   78], 90.00th=[   96], 95.00th=[  114],
           | 99.00th=[  163], 99.50th=[  197], 99.90th=[  288], 99.95th=[  334],
           | 99.99th=[  401]
         bw (  KiB/s): min= 3184, max=11840, per=100.00%, avg=10002.52, stdev=1354.82, samples=119
         iops        : min=  796, max= 2960, avg=2500.61, stdev=338.72, samples=119
        lat (msec)   : 2=0.10%, 4=2.10%, 10=7.19%, 20=11.34%, 50=33.50%
        lat (msec)   : 100=37.31%, 250=8.25%, 500=0.21%
        cpu          : usr=1.37%, sys=4.12%, ctx=65230, majf=0, minf=11
        IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=100.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
           issued rwts: total=0,150044,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=128
      
      Run status group 0 (all jobs):
        WRITE: bw=9.77MiB/s (10.2MB/s), 9.77MiB/s-9.77MiB/s (10.2MB/s-10.2MB/s), io=586MiB (615MB), run=60015-60015msec
      [root@ceph143 data]# 
      [root@ceph143 data]# ll -h
      total 20G
      drwxr-xr-x  5 root root    7 Sep 17 11:56 ./
      drwxr-xr-x 22 root root 4.0K Sep 17 11:51 ../
      drwxr-xr-x  2 root root    2 Sep 16 14:56 haha/
      drwxr-xr-x  3 root root    1 Sep 16 15:21 linux99/
      -rw-r--r--  1 root root 5.0G Sep 17 11:54 randwrite.0.0
      -rw-r--r--  1 root root 5.0G Sep 17 11:55 randwrite1.0.0
      -rw-r--r--  1 root root 5.0G Sep 17 11:56 randwrite2.0.0
      -rw-r--r--  1 root root 5.0G Sep 17 11:57 randwrite3.0.0
      drwxr-xr-x  2 root root    2 Sep 16 14:57 xixi/
      [root@ceph143 data]# 
      [root@ceph143 data]# 
      
       
      Run status group 0 (all jobs): 相關參數說明:
      
      	WRITE:
      		表示寫的參數信息。
      		
      	bw:
      		表示寫入的速度。
      		
      	io:
      		產生的數據大小。
      		
      	run:
      		運行的時間。
      
      

      5.其他測試

      具有不同 iodepth 值的 8 K 檢驗:
      	
      相關代碼:
      fio --name=randwrite0 --rw=randwrite --direct=1 --ioengine=libaio --bs=8k --iodepth=1  --size=5G --runtime=60 --group_reporting=1
      fio --name=randwrite11 --rw=randwrite --direct=1 --ioengine=libaio --bs=8k --iodepth=32 --size=5G --runtime=60 --group_reporting=1
      fio --name=randwrite22 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=64  --size=5G --runtime=60 --group_reporting=1
      fio --name=randwrite33 --rw=randwrite --direct=1 --ioengine=libaio --bs=4k --iodepth=128  --size=5G --runtime=60 --group_reporting=1
       
       
       
       關鍵字段說明:
      	--name: 指定文件的名稱。
      	
      	--rw: 指定寫入的類型,有效值為: write/randwrite/rw/randrw/trim/randtrim/trimwrite。
       
      	--direct和--ioengine選項一般可以關聯設置,其中io引擎有以下有效值:
      		[root@ceph143 ~]# man fio 
      		...
      		   I/O engine
      			   ioengine=str
      					  Defines how the job issues I/O to the file. The following types are defined:
      
      							 sync   Basic read(2) or write(2) I/O. lseek(2) is used to position the I/O location.  See fsync and fdatasync for syncing write I/Os.
      
      							 psync  Basic pread(2) or pwrite(2) I/O. Default on all supported operating systems except for Windows.
      
      							 vsync  Basic readv(2) or writev(2) I/O. Will emulate queuing by coalescing adjacent I/Os into a single submission.
      
      							 pvsync Basic preadv(2) or pwritev(2) I/O.
      
      							 pvsync2
      									Basic preadv2(2) or pwritev2(2) I/O.
      
      							 libaio Linux native asynchronous I/O. Note that Linux may only support queued behavior with non-buffered I/O (set `direct=1' or `buffered=0').  This engine defines
      									engine specific options.
      
      							 ...
      
      	--bs:
      		一次讀寫的數據大小。
      		
      	
      	--iodepth:
      		指定IO的深度,計算時應該和bs有關。
      		
      	--size:
      		測試數據量的大小。
      		
      	--runtime
      		指定運行的時間,單位為秒。
      		
      	--group_reporting
      		以組為生成報告。
      		
      		
       
      查看文檔可參考幫助信息:
      [root@ceph143 ~]# man fio 
      
      
      	彩蛋1:
      [root@ceph143 data]# fio --name=yinzhengjie-write --rw=write --direct=1 --ioengine=libaio --bs=8k --iodepth=1  --size=10G --runtime=30 --group_reporting=1
      yinzhengjie-write: (g=0): rw=write, bs=(R) 8192B-8192B, (W) 8192B-8192B, (T) 8192B-8192B, ioengine=libaio, iodepth=1
      fio-3.28
      Starting 1 process
      yinzhengjie-write: Laying out IO file (1 file / 10240MiB)
      Jobs: 1 (f=1): [W(1)][100.0%][w=3139KiB/s][w=392 IOPS][eta 00m:00s]
      yinzhengjie-write: (groupid=0, jobs=1): err= 0: pid=78153: Wed Sep 17 15:44:54 2025
        write: IOPS=330, BW=2645KiB/s (2708kB/s)(77.5MiB/30001msec); 0 zone resets
          slat (usec): min=11, max=430, avg=26.14, stdev=12.04
          clat (usec): min=945, max=18347, avg=2992.92, stdev=1857.03
           lat (usec): min=957, max=18373, avg=3019.48, stdev=1859.18
          clat percentiles (usec):
           |  1.00th=[ 1123],  5.00th=[ 1303], 10.00th=[ 1467], 20.00th=[ 1811],
           | 30.00th=[ 2024], 40.00th=[ 2212], 50.00th=[ 2409], 60.00th=[ 2638],
           | 70.00th=[ 2999], 80.00th=[ 3654], 90.00th=[ 5407], 95.00th=[ 7439],
           | 99.00th=[ 9896], 99.50th=[10814], 99.90th=[13304], 99.95th=[14222],
           | 99.99th=[18220]
         bw (  KiB/s): min= 2208, max= 4096, per=100.00%, avg=2651.37, stdev=466.69, samples=59
         iops        : min=  276, max=  512, avg=331.39, stdev=58.35, samples=59
        lat (usec)   : 1000=0.03%
        lat (msec)   : 2=29.11%, 4=54.26%, 10=15.70%, 20=0.91%
        cpu          : usr=0.24%, sys=1.20%, ctx=9929, majf=0, minf=10
        IO depths    : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           issued rwts: total=0,9919,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=1
      
      Run status group 0 (all jobs):
        WRITE: bw=2645KiB/s (2708kB/s), 2645KiB/s-2645KiB/s (2708kB/s-2708kB/s), io=77.5MiB (81.3MB), run=30001-30001msec
      [root@ceph143 data]#
      [root@ceph143 data]# ll yinzhengjie-write.0.0 
      -rw-r--r-- 1 root root 81256448 Sep 17 15:44 yinzhengjie-write.0.0
      [root@ceph143 data]# 
      [root@ceph143 data]# ll -h yinzhengjie-write.0.0 
      -rw-r--r-- 1 root root 78M Sep 17 15:44 yinzhengjie-write.0.0
      [root@ceph143 data]# 
      
      
      
       	彩蛋2:
      [root@ceph143 data]# fio --name=yinzhengjie-write-read --rw=rw --direct=1 --ioengine=libaio --bs=8k --iodepth=1  --size=10G --runtime=30 --group_reporting=1
      yinzhengjie-write-read: (g=0): rw=rw, bs=(R) 8192B-8192B, (W) 8192B-8192B, (T) 8192B-8192B, ioengine=libaio, iodepth=1
      fio-3.28
      Starting 1 process
      yinzhengjie-write-read: Laying out IO file (1 file / 10240MiB)
      Jobs: 1 (f=0): [f(1)][100.0%][r=3217KiB/s,w=3241KiB/s][r=402,w=405 IOPS][eta 00m:00s]
      yinzhengjie-write-read: (groupid=0, jobs=1): err= 0: pid=78408: Wed Sep 17 15:48:36 2025
        read: IOPS=250, BW=2001KiB/s (2049kB/s)(58.6MiB/30003msec)
          slat (usec): min=7, max=257, avg=19.29, stdev=12.35
          clat (usec): min=97, max=10374, avg=958.61, stdev=1065.27
           lat (usec): min=106, max=10397, avg=978.22, stdev=1066.59
          clat percentiles (usec):
           |  1.00th=[  155],  5.00th=[  210], 10.00th=[  269], 20.00th=[  334],
           | 30.00th=[  383], 40.00th=[  424], 50.00th=[  461], 60.00th=[  519],
           | 70.00th=[  652], 80.00th=[ 1696], 90.00th=[ 2802], 95.00th=[ 3097],
           | 99.00th=[ 4555], 99.50th=[ 5276], 99.90th=[ 6521], 99.95th=[ 6849],
           | 99.99th=[10421]
         bw (  KiB/s): min= 1408, max= 3840, per=98.97%, avg=1980.27, stdev=544.85, samples=59
         iops        : min=  176, max=  480, avg=247.51, stdev=68.11, samples=59
        write: IOPS=251, BW=2014KiB/s (2063kB/s)(59.0MiB/30003msec); 0 zone resets
          slat (usec): min=9, max=671, avg=23.66, stdev=16.24
          clat (usec): min=964, max=18388, avg=2967.85, stdev=2052.60
           lat (usec): min=977, max=18420, avg=2991.87, stdev=2055.08
          clat percentiles (usec):
           |  1.00th=[ 1106],  5.00th=[ 1237], 10.00th=[ 1336], 20.00th=[ 1549],
           | 30.00th=[ 1778], 40.00th=[ 2008], 50.00th=[ 2245], 60.00th=[ 2507],
           | 70.00th=[ 2966], 80.00th=[ 3818], 90.00th=[ 6128], 95.00th=[ 7832],
           | 99.00th=[10159], 99.50th=[10814], 99.90th=[13566], 99.95th=[13960],
           | 99.99th=[18482]
         bw (  KiB/s): min= 1261, max= 3568, per=98.79%, avg=1990.32, stdev=525.10, samples=59
         iops        : min=  157, max=  446, avg=248.76, stdev=65.66, samples=59
        lat (usec)   : 100=0.01%, 250=3.80%, 500=24.78%, 750=7.86%, 1000=1.35%
        lat (msec)   : 2=22.84%, 4=29.17%, 10=9.64%, 20=0.55%
        cpu          : usr=0.31%, sys=1.44%, ctx=15394, majf=0, minf=14
        IO depths    : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           issued rwts: total=7503,7555,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=1
      
      Run status group 0 (all jobs):
         READ: bw=2001KiB/s (2049kB/s), 2001KiB/s-2001KiB/s (2049kB/s-2049kB/s), io=58.6MiB (61.5MB), run=30003-30003msec
        WRITE: bw=2014KiB/s (2063kB/s), 2014KiB/s-2014KiB/s (2063kB/s-2063kB/s), io=59.0MiB (61.9MB), run=30003-30003msec
      [root@ceph143 data]# 
      [root@ceph143 data]# fio --name=yinzhengjie-write-read --rw=rw --direct=1 --ioengine=libaio --bs=8k --iodepth=1  --size=10G --runtime=60 --group_reporting=1
      yinzhengjie-write-read: (g=0): rw=rw, bs=(R) 8192B-8192B, (W) 8192B-8192B, (T) 8192B-8192B, ioengine=libaio, iodepth=1
      fio-3.28
      Starting 1 process
      Jobs: 1 (f=1): [M(1)][100.0%][r=1705KiB/s,w=1913KiB/s][r=213,w=239 IOPS][eta 00m:00s]
      yinzhengjie-write-read: (groupid=0, jobs=1): err= 0: pid=78433: Wed Sep 17 15:50:37 2025
        read: IOPS=276, BW=2209KiB/s (2262kB/s)(129MiB/60002msec)
          slat (usec): min=6, max=579, avg=16.90, stdev= 9.90
          clat (usec): min=69, max=11063, avg=923.42, stdev=1122.67
           lat (usec): min=76, max=11079, avg=940.62, stdev=1124.67
          clat percentiles (usec):
           |  1.00th=[  126],  5.00th=[  176], 10.00th=[  206], 20.00th=[  265],
           | 30.00th=[  310], 40.00th=[  359], 50.00th=[  400], 60.00th=[  445],
           | 70.00th=[  553], 80.00th=[ 1778], 90.00th=[ 2769], 95.00th=[ 3032],
           | 99.00th=[ 5145], 99.50th=[ 5735], 99.90th=[ 6915], 99.95th=[ 7373],
           | 99.99th=[ 8455]
         bw (  KiB/s): min= 1248, max= 4400, per=100.00%, avg=2212.31, stdev=748.52, samples=119
         iops        : min=  156, max=  550, avg=276.49, stdev=93.59, samples=119
        write: IOPS=276, BW=2210KiB/s (2263kB/s)(130MiB/60002msec); 0 zone resets
          slat (usec): min=8, max=1266, avg=20.61, stdev=14.21
          clat (usec): min=866, max=19189, avg=2651.57, stdev=1893.19
           lat (usec): min=878, max=19219, avg=2672.51, stdev=1895.37
          clat percentiles (usec):
           |  1.00th=[ 1020],  5.00th=[ 1156], 10.00th=[ 1254], 20.00th=[ 1418],
           | 30.00th=[ 1614], 40.00th=[ 1827], 50.00th=[ 2024], 60.00th=[ 2278],
           | 70.00th=[ 2606], 80.00th=[ 3228], 90.00th=[ 5145], 95.00th=[ 7177],
           | 99.00th=[ 9765], 99.50th=[10814], 99.90th=[13173], 99.95th=[15926],
           | 99.99th=[18744]
         bw (  KiB/s): min= 1536, max= 4112, per=100.00%, avg=2214.48, stdev=719.96, samples=119
         iops        : min=  192, max=  514, avg=276.76, stdev=90.03, samples=119
        lat (usec)   : 100=0.24%, 250=8.24%, 500=25.02%, 750=3.41%, 1000=1.12%
        lat (msec)   : 2=27.07%, 4=26.78%, 10=7.72%, 20=0.40%
        cpu          : usr=0.68%, sys=1.05%, ctx=33273, majf=0, minf=17
        IO depths    : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
           submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
           issued rwts: total=16567,16576,0,0 short=0,0,0,0 dropped=0,0,0,0
           latency   : target=0, window=0, percentile=100.00%, depth=1
      
      Run status group 0 (all jobs):
         READ: bw=2209KiB/s (2262kB/s), 2209KiB/s-2209KiB/s (2262kB/s-2262kB/s), io=129MiB (136MB), run=60002-60002msec
        WRITE: bw=2210KiB/s (2263kB/s), 2210KiB/s-2210KiB/s (2263kB/s-2263kB/s), io=130MiB (136MB), run=60002-60002msec
      [root@ceph143 data]# 
      
      	
      	
      后記:
      	如果你覺得fio工具不太好用,可以直接使用scp命令和cephfs路徑的進行大文件數據拷貝,也能間接體現出cephfs的性能。
      

      四.Ceph Object Gateway 性能進行基準測試

      1.對象存儲網關的性能測試

      使用 s3cmd 工具對 Ceph Object Gateway 性能進行基準測試。
      
      參考鏈接:
      	https://www.ibm.com/docs/zh/storage-ceph/7.0.0?topic=benchmark-benchmarking-ceph-object-gateway-performance
      	
      

      2.上載文件并測量速度。

      time 命令用于度量上載的持續時間。
      	
      [root@ceph141 ~]# s3cmd ls s3://yinzhengjie
      2025-09-17 02:35     39451064  s3://yinzhengjie/containerd
      2025-09-17 02:36     58246432  s3://yinzhengjie/dockerd
      [root@ceph141 ~]# 
      [root@ceph141 ~]# ll yinzhengjie-ceph-v19.tar.gz 
      -rw-r--r-- 1 root root 1333207552 May 16 09:38 yinzhengjie-ceph-v19.tar.gz
      [root@ceph141 ~]# 
      [root@ceph141 ~]# ll -h yinzhengjie-ceph-v19.tar.gz 
      -rw-r--r-- 1 root root 1.3G May 16 09:38 yinzhengjie-ceph-v19.tar.gz
      [root@ceph141 ~]# 
      [root@ceph141 ~]# time s3cmd put yinzhengjie-ceph-v19.tar.gz s3://yinzhengjie
      upload: 'yinzhengjie-ceph-v19.tar.gz' -> 's3://yinzhengjie/yinzhengjie-ceph-v19.tar.gz'  [part 1 of 85, 15MB] [1 of 1]
       15728640 of 15728640   100% in    0s    24.22 MB/s  done
      
      ...
      upload: 'yinzhengjie-ceph-v19.tar.gz' -> 's3://yinzhengjie/yinzhengjie-ceph-v19.tar.gz'  [part 85 of 85, 11MB] [1 of 1]
       12001792 of 12001792   100% in    0s    53.52 MB/s  done
      
      real	0m31.253s  # 命令執行的耗時時間,將近32s。
      user	0m3.156s
      sys		0m10.206s
      [root@ceph141 ~]# 
      [root@ceph141 ~]# echo 1333207552/32/1024/1024|bc  # 不難發現,rgw的寫性能應該為39MB/s。
      39
      [root@ceph141 ~]# 
      

      3.下載文件并測量速度。

      time 命令用于度量下載持續時間。
      
      [root@ceph141 ~]# time s3cmd get s3://yinzhengjie/yinzhengjie-ceph-v19.tar.gz /tmp/
      download: 's3://yinzhengjie/yinzhengjie-ceph-v19.tar.gz' -> '/tmp/yinzhengjie-ceph-v19.tar.gz'  [1 of 1]
       1333207552 of 1333207552   100% in   11s   113.77 MB/s  done
      
      real	0m11.336s  # 耗時講解12s
      user	0m1.425s
      sys		0m2.944s
      [root@ceph141 ~]# 
      [root@ceph141 ~]# ll /tmp/yinzhengjie-ceph-v19.tar.gz 
      -rw-r--r-- 1 root root 1333207552 Sep 17 08:45 /tmp/yinzhengjie-ceph-v19.tar.gz
      [root@ceph141 ~]# 
      [root@ceph141 ~]# echo 1333207552/12/1024/1024|bc  # 最終得出結論,rgw的讀性能將近105MB/s。
      105
      [root@ceph141 ~]# 
      

      4.列出指定存儲區中的所有對象并測量響應時間。

      [root@ceph141 ~]# time s3cmd ls s3://yinzhengjie/
      2025-09-17 02:35     39451064  s3://yinzhengjie/containerd
      2025-09-17 02:36     58246432  s3://yinzhengjie/dockerd
      2025-09-17 08:45   1333207552  s3://yinzhengjie/yinzhengjie-ceph-v19.tar.gz
      
      real	0m0.200s
      user	0m0.106s
      sys		0m0.026s
      [root@ceph141 ~]# 
      
      posted @ 2025-09-10 23:02  尹正杰  閱讀(121)  評論(0)    收藏  舉報
      主站蜘蛛池模板: 久热这里只有精品视频3| 亚洲精品宾馆在线精品酒店| 久久精品国产亚洲av麻豆小说| 国产va免费精品观看精品| 国产精品麻豆中文字幕| 亚洲成av人片一区二区| 精品久久久久中文字幕日本| www国产成人免费观看视频| 精品视频一区二区三区不卡 | 国产精品无码av不卡| 国产成人精品亚洲高清在线| 精品乱码一区二区三四五区| 国产一区二区三区免费观看| 午夜DY888国产精品影院| 久久综合伊人77777| 成人午夜在线观看刺激| 国产色精品久久人妻| 久章草这里只有精品| 亚洲av成人一区国产精品| 久久综合国产精品一区二区| 久久99热只有频精品8| 麻豆一区二区三区精品视频| 成年在线观看免费人视频| 18禁裸乳无遮挡自慰免费动漫 | 久久亚洲国产精品五月天| 国产成人高清精品亚洲| 久久国产一区二区三区| 日本一区二区三区后入式| 日韩乱码人妻无码中文字幕视频 | 国产精品视频午夜福利| 精品无码国产一区二区三区av | 精品国产中文字幕av| 天天爽天天摸天天碰| 午夜激情福利一区二区| 国产美女高潮流白浆视频| 亚洲深深色噜噜狠狠网站| 国内精品视频一区二区三区| 平潭县| 亚洲综合黄色的在线观看| 护士张开腿被奷日出白浆| 无码人妻黑人中文字幕|