
\u5927\u5bb6\u597d\uff0c\u6211\u4eec\u9700\u8981\u5c06\u6240\u6709\u4e91\u73af\u5883\u8fc1\u79fb\u5230 Proxmox \u3002\u76ee\u524d\u6211\u6b63\u5728\u8bc4\u4f30\u6d4b\u8bd5 Proxmox+Ceph+OpenStack \u3002
\n\u4f46\u662f\u73b0\u5728\u9047\u5230\u4ee5\u4e0b\u56f0\u96be\uff1a
\nCPU\uff1aDual Intel\u00ae Xeon\u00ae E5-2698Bv3
\nMemory\uff1a8 x 16G DDR3
\nDual 1 Gbit NIC\uff1aRealtek Semiconductor Co., Ltd. RTL8111/8168/8411
\nDisk\uff1a
\n1 x 500G NVME SAMSUNG MZALQ512HALU-000L1 (\u540c\u65f6\u4e5f\u662f PVE \u4e2d\u7684 ssd-data Thinpool)
\n1 x 500G SATA WDC_WD5000AZLX-60K2TA0 (\u7269\u7406\u673a\u7cfb\u7edf\u76d8)
\n2 x 500G SATA WDC_WD5000AZLX-60K2TA0
\n1 x 1T SATA ST1000LM035-1RK172
\nPVE\uff1apve-manager/7.3-4/d69b70d4 (running kernel: 5.15.74-1-pve)
\nNetwork Configure\uff1a
\nenp4s0 (OVS Port) -> vmbr0 (OVS Bridge) -> br0mgmt (192.168.1.3/24,192.168.1.1)
\nenp5s0 (OVS Port,MTU=9000) -> vmbr1 (OVS Bridge,MTU=9000)
\nvmbr2 (OVS Bridge,MTU=9000)
\nCPU\uff1a32 (1 sockets, 32 cores) [host]
\nMemory\uff1a32G
\nDisk\uff1a
\n1 x local-lvm:vm-101-disk-0,iothread=1,size=32G
\n2 x ssd-data:vm-101-disk-0,iothread=1,size=120G
\nNetwork Device\uff1a
\nnet0: bridge=vmbr0,firewall=1
\nnet1: bridge=vmbr2,firewall=1,mtu=1 (Ceph Cluster/Public Network)
\nnet2: bridge=vmbr0,firewall=1
\nnet3: bridge=vmbr0,firewall=1
\nNetwork Configure\uff1a
\nens18 (net0,OVS Port) -> vmbr0 (OVS Bridge) -> br0mgmt (10.10.1.11/24,10.10.1.1)
\nens19 (net1,OVS Port,MTU=9000) -> vmbr1 (OVS Bridge,MTU=9000) -> br1ceph (192.168.10.1/24,MTU=9000)
\nens20 (net2,Network Device,Active=No)
\nens21 (net3,Network Device,Active=No)
\n\u5bf9\u4e8e fio-cdm \uff0c\u5982\u679c\u4e0d\u586b\u5199\u4efb\u4f55\u53c2\u6570\uff0c\u90a3\u4e48\u5bf9\u5e94\u4e8e fio \u7684\u914d\u7f6e\u6587\u4ef6\u5982\u4e0b
\n\u4f7f\u7528 python fio-cdm -f - \u53ef\u4ee5\u5f97\u5230
[global]\nioengine=libaio\nfilename=.fio_testmark\ndirectory=/root\nsize=1073741824.0\ndirect=1\nruntime=5\nrefill_buffers\nnorandommap\nrandrepeat=0\nallrandrepeat=0\ngroup_reporting\n\n[seq-read-1m-q8-t1]\nrw=read\nbs=1m\nrwmixread=0\niodepth=8\nnumjobs=1\nloops=5\nstonewall\n\n[seq-write-1m-q8-t1]\nrw=write\nbs=1m\nrwmixread=0\niodepth=8\nnumjobs=1\nloops=5\nstonewall\n\n[seq-read-1m-q1-t1]\nrw=read\nbs=1m\nrwmixread=0\niodepth=1\nnumjobs=1\nloops=5\nstonewall\n\n[seq-write-1m-q1-t1]\nrw=write\nbs=1m\nrwmixread=0\niodepth=1\nnumjobs=1\nloops=5\nstonewall\n\n[rnd-read-4k-q32-t16]\nrw=randread\nbs=4k\nrwmixread=0\niodepth=32\nnumjobs=16\nloops=5\nstonewall\n\n[rnd-write-4k-q32-t16]\nrw=randwrite\nbs=4k\nrwmixread=0\niodepth=32\nnumjobs=16\nloops=5\nstonewall\n\n[rnd-read-4k-q1-t1]\nrw=randread\nbs=4k\nrwmixread=0\niodepth=1\nnumjobs=1\nloops=5\nstonewall\n\n[rnd-write-4k-q1-t1]\nrw=randwrite\nbs=4k\nrwmixread=0\niodepth=1\nnumjobs=1\nloops=5\nstonewall\n\n# prepare tools\nroot@pve01:~# apt update -y && apt upgrade -y\nroot@pve01:~# apt install fio git -y\nroot@pve01:~# git clone https://github.com/xlucn/fio-cdm.git\n\n# create test block\nroot@pve01:~# rbd create test -s 20G\nroot@pve01:~# rbd map test\nroot@pve01:~# mkfs.xfs /dev/rbd0\nroot@pve01:~# mkdir /mnt/test\nroot@pve01:/mnt# mount /dev/rbd0 /mnt/test\n\n# start test\nroot@pve01:/mnt/test# python3 ~/fio-cdm/fio-cdm\n\nroot@pve01:~# apt install iperf3 -y\nroot@pve01:~# iperf3 -s\n-----------------------------------------------------------\nServer listening on 5201\n-----------------------------------------------------------\nAccepted connection from 10.10.1.12, port 52968\n[ 5] local 10.10.1.11 port 5201 connected to 10.10.1.12 port 52972\n[ ID] Interval Transfer Bitrate\n[ 5] 0.00-1.00 sec 1.87 GBytes 16.0 Gbits/sec \n[ 5] 1.00-2.00 sec 1.92 GBytes 16.5 Gbits/sec \n[ 5] 2.00-3.00 sec 1.90 GBytes 16.4 Gbits/sec \n[ 5] 3.00-4.00 sec 1.90 GBytes 16.3 Gbits/sec \n[ 5] 4.00-5.00 sec 1.85 GBytes 15.9 Gbits/sec \n[ 5] 5.00-6.00 sec 1.85 GBytes 15.9 Gbits/sec \n[ 5] 6.00-7.00 sec 1.70 GBytes 14.6 Gbits/sec \n[ 5] 7.00-8.00 sec 1.75 GBytes 15.0 Gbits/sec \n[ 5] 8.00-9.00 sec 1.89 GBytes 16.2 Gbits/sec \n[ 5] 9.00-10.00 sec 1.87 GBytes 16.0 Gbits/sec \n[ 5] 10.00-10.04 sec 79.9 MBytes 15.9 Gbits/sec \n- - - - - - - - - - - - - - - - - - - - - - - - -\n[ ID] Interval Transfer Bitrate\n[ 5] 0.00-10.04 sec 18.6 GBytes 15.9 Gbits/sec receiver\n\n\nroot@pve01:~# ping -M do -s 8000 192.168.10.2\nPING 192.168.10.2 (192.168.10.2) 8000(8028) bytes of data.\n8008 bytes from 192.168.10.2: icmp_seq=1 ttl=64 time=1.51 ms\n8008 bytes from 192.168.10.2: icmp_seq=2 ttl=64 time=0.500 ms\n^C\n--- 192.168.10.2 ping statistics ---\n2 packets transmitted, 2 received, 0% packet loss, time 1002ms\nrtt min/avg/max/mdev = 0.500/1.007/1.514/0.507 ms\nroot@pve01:~# \n\nBenchmark Result (Ceph and the system have not been tuned or bcache accelerated. ):
\nstep.
\nroot@pve1:~# lsblk\nNAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT\nsda 8:0 0 465.8G 0 disk \n\u251c\u2500sda1 8:1 0 1007K 0 part \n\u251c\u2500sda2 8:2 0 512M 0 part /boot/efi\n\u2514\u2500sda3 8:3 0 465.3G 0 part \n \u251c\u2500pve-root 253:0 0 96G 0 lvm /\n \u251c\u2500pve-data_tmeta 253:1 0 3.5G 0 lvm \n \u2502 \u2514\u2500pve-data-tpool 253:3 0 346.2G 0 lvm \n \u2502 \u251c\u2500pve-data 253:4 0 346.2G 1 lvm \n \u2502 \u2514\u2500pve-vm--100--disk--0 253:5 0 16G 0 lvm \n \u2514\u2500pve-data_tdata 253:2 0 346.2G 0 lvm \n \u2514\u2500pve-data-tpool 253:3 0 346.2G 0 lvm \n \u251c\u2500pve-data 253:4 0 346.2G 1 lvm \n \u2514\u2500pve-vm--100--disk--0 253:5 0 16G 0 lvm \nsdb 8:16 0 931.5G 0 disk \nsdc 8:32 0 465.8G 0 disk \nsdd 8:48 0 465.8G 0 disk \nnvme0n1 259:0 0 476.9G 0 disk \nroot@pve1:~# mkfs.xfs /dev/nvme0n1 -f\nroot@pve1:~# mkdir /mnt/nvme\nroot@pve1:~# mount /dev/nvme0n1 /mnt/nvme\nroot@pve1:~# cd /mnt/nvme/\n\nresult.
\nroot@pve1:/mnt/nvme# python3 ~/fio-cdm/fio-cdm\ntests: 5, size: 1.0GiB, target: /mnt/nvme 3.4GiB/476.7GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 2361.95| 1435.48|\n|SEQ1M Q1 T1 | 1629.84| 1262.63|\n|RND4K Q32T16| 954.86| 1078.88|\n|. IOPS | 233119.53| 263398.08|\n|. latency us| 2194.84| 1941.78|\n|RND4K Q1 T1 | 55.56| 225.06|\n|. IOPS | 13565.49| 54946.21|\n|. latency us| 72.76| 16.97|\n\n\n\u4fee\u6539 ceph.conf \u4e2dosd_pool_default_min_size\u548cosd_pool_default_size\u4e3a 1 \uff0c\u7136\u540esystemctl restart ceph.target\u5e76\u4fee\u590d\u6240\u6709\u62a5\u9519
step.
\nroot@pve01:/mnt/test# ceph osd pool get rbd size\nsize: 2\nroot@pve01:/mnt/test# ceph config set global mon_allow_pool_size_one true\nroot@pve01:/mnt/test# ceph osd pool set rbd min_size 1\nset pool 2 min_size to 1\nroot@pve01:/mnt/test# ceph osd pool set rbd size 1 --yes-i-really-mean-it\nset pool 2 size to 1\n\n\nresult
\nroot@pve01:/mnt/test# ceph -s\n cluster:\n id: 1f3eacc8-2488-4e1a-94bf-7181ee7db522\n health: HEALTH_WARN\n 2 pool(s) have no replicas configured\n \n services:\n mon: 3 daemons, quorum pve01,pve02,pve03 (age 17m)\n mgr: pve01(active, since 17m), standbys: pve02, pve03\n osd: 6 osds: 1 up (since 19s), 1 in (since 96s)\n \n data:\n pools: 2 pools, 33 pgs\n objects: 281 objects, 1.0 GiB\n usage: 1.1 GiB used, 119 GiB / 120 GiB avail\n pgs: 33 active+clean\n \nroot@pve01:/mnt/test# ceph osd tree\nID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF\n-1 0.70312 root default \n-3 0.23438 host pve01 \n 0 ssd 0.11719 osd.0 up 1.00000 1.00000\n 1 ssd 0.11719 osd.1 down 0 1.00000\n-5 0.23438 host pve02 \n 2 ssd 0.11719 osd.2 down 0 1.00000\n 3 ssd 0.11719 osd.3 down 0 1.00000\n-7 0.23438 host pve03 \n 4 ssd 0.11719 osd.4 down 0 1.00000\n 5 ssd 0.11719 osd.5 down 0 1.00000\nroot@pve01:/mnt/test# python3 ~/fio-cdm/fio-cdm\ntests: 5, size: 1.0GiB, target: /mnt/test 175.8MiB/20.0GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 1153.07| 515.29|\n|SEQ1M Q1 T1 | 447.35| 142.98|\n|RND4K Q32T16| 99.07| 32.19|\n|. IOPS | 24186.26| 7859.91|\n|. latency us| 21148.94| 65076.23|\n|RND4K Q1 T1 | 7.47| 1.48|\n|. IOPS | 1823.24| 360.98|\n|. latency us| 545.98| 2765.23|\nroot@pve01:/mnt/test# \n\n\n\u4fee\u6539 crushmap \u4e2dstep chooseleaf firstn 0 type host\uff0c\u5c06host\u4fee\u6539\u4e3aosd
OSD tree
\nroot@pve01:/etc/ceph# ceph osd tree\nID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF\n-1 0.70312 root default \n-3 0.23438 host pve01 \n 0 ssd 0.11719 osd.0 up 1.00000 1.00000\n 1 ssd 0.11719 osd.1 up 1.00000 1.00000\n-5 0.23438 host pve02 \n 2 ssd 0.11719 osd.2 down 0 1.00000\n 3 ssd 0.11719 osd.3 down 0 1.00000\n-7 0.23438 host pve03 \n 4 ssd 0.11719 osd.4 down 0 1.00000\n 5 ssd 0.11719 osd.5 down 0 1.00000\n\n\nresult
\nroot@pve01:/mnt/test# python3 ~/fio-cdm/fio-cdm\ntests: 5, size: 1.0GiB, target: /mnt/test 175.8MiB/20.0GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 1376.59| 397.29|\n|SEQ1M Q1 T1 | 442.74| 111.41|\n|RND4K Q32T16| 114.97| 29.08|\n|. IOPS | 28068.12| 7099.90|\n|. latency us| 18219.04| 72038.06|\n|RND4K Q1 T1 | 6.82| 1.04|\n|. IOPS | 1665.27| 254.40|\n|. latency us| 598.00| 3926.30|\n\n\nOSD tree
\nroot@pve01:/etc/ceph# ceph osd tree\nID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF\n-1 0.70312 root default \n-3 0.23438 host pve01 \n 0 ssd 0.11719 osd.0 up 1.00000 1.00000\n 1 ssd 0.11719 osd.1 up 1.00000 1.00000\n-5 0.23438 host pve02 \n 2 ssd 0.11719 osd.2 up 1.00000 1.00000\n 3 ssd 0.11719 osd.3 up 1.00000 1.00000\n-7 0.23438 host pve03 \n 4 ssd 0.11719 osd.4 up 1.00000 1.00000\n 5 ssd 0.11719 osd.5 up 1.00000 1.00000\n\nresult
\ntests: 5, size: 1.0GiB, target: /mnt/test 175.8MiB/20.0GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 1527.37| 296.25|\n|SEQ1M Q1 T1 | 408.86| 106.43|\n|RND4K Q32T16| 189.20| 43.00|\n|. IOPS | 46191.94| 10499.01|\n|. latency us| 11068.93| 48709.85|\n|RND4K Q1 T1 | 4.99| 0.95|\n|. IOPS | 1219.16| 232.37|\n|. latency us| 817.51| 4299.14|\n\n\nG:\\fio>python \"E:\\Programing\\PycharmProjects\\fio-cdm\\fio-cdm\"\ntests: 5, size: 1.0GiB, target: G:\\fio 228.2GiB/953.8GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 363.45| 453.54|\n|SEQ1M Q1 T1 | 329.47| 404.09|\n|RND4K Q32T16| 196.16| 212.42|\n|. IOPS | 47890.44| 51861.48|\n|. latency us| 10677.71| 9862.74|\n|RND4K Q1 T1 | 20.66| 65.44|\n|. IOPS | 5044.79| 15976.40|\n|. latency us| 197.04| 61.07|\n\nroot@pve1:/mnt/test# python3 ~/fio-cdm/fio-cdm\ntests: 5, size: 1.0GiB, target: /mnt/test 3.4GiB/476.7GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 2358.84| 1476.54|\n|SEQ1M Q1 T1 | 1702.19| 1291.18|\n|RND4K Q32T16| 955.34| 1070.17|\n|. IOPS | 233238.46| 261273.09|\n|. latency us| 2193.90| 1957.79|\n|RND4K Q1 T1 | 55.04| 229.99|\n|. IOPS | 13437.11| 56149.97|\n|. latency us| 73.17| 16.65|\n\n\u200b
\n\u4f7f\u7528 bcache \u52a0\u901f\u540e\u7684 hdd+ssd \u6df7\u5408\u78c1\u76d8 ceph \u67b6\u6784\u7684\u6d4b\u8bd5\u7ed3\u679c
\n\u53ef\u4ee5\u770b\u5230 read \u6709\u660e\u663e\u63d0\u5347\uff0c\u4f46\u662f write \u4ecd\u7136\u975e\u5e38\u5dee\u52b2
\ntests: 5, size: 1.0GiB, target: /mnt/test 104.3MiB/10.0GiB\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ1M Q8 T1 | 1652.93| 242.41|\n|SEQ1M Q1 T1 | 552.91| 81.16|\n|RND4K Q32T16| 429.52| 31.95|\n|. IOPS | 104862.76| 7799.72|\n|. latency us| 4879.87| 65618.50|\n|RND4K Q1 T1 | 13.10| 0.45|\n|. IOPS | 3198.16| 110.09|\n|. latency us| 310.07| 9077.11|\n\n\u5373\u4fbf\u662f\u4e00\u5757\u78c1\u76d8\u4e0a\u591a\u4e2a osd \u4e5f\u65e0\u6cd5\u89e3\u51b3 write \u95ee\u9898
\n\u8be6\u7ec6\u6d4b\u8bd5\u6570\u636e\uff1a https://www.reddit.com/r/ceph/comments/xnse2j/comment/j6qs57g/?cOntext=3
\n\u5982\u679c\u4f7f\u7528 VMware vSAN \uff0c\u53ef\u4ee5\u5f88\u8f7b\u677e\u7684\u8ba9 hdd \u52a0\u901f\u5230 ssd \u7684\u901f\u5ea6\uff0c\u800c\u4e14\u51e0\u4e4e\u611f\u77e5\u4e0d\u5230 hdd \u7684\u5b58\u5728\uff08\u5e76\u672a\u8be6\u7ec6\u5bf9\u6bd4\uff0c\u6211\u53ea\u662f\u51ed\u611f\u89c9\u7684\uff09
\n\u6211\u5206\u6790\u6bd4\u8f83\u4e86\u51e0\u4e2a\u62a5\u544a\uff0c\u6458\u8981\u5982\u4e0b
\nProxmox-VE_Ceph-Benchmark-201802.pdf
Proxmox-VE_Ceph-Benchmark-202009-rev2.pdf
Dell_R730xd_RedHat_Ceph_Performance_SizingGuide_WhitePaper.pdf
micron_9300_and_red_hat_ceph_reference_architecture.pdf
\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5\uff0c\u6d4b\u8bd5\u89c4\u6a21\u4e3a 6 x Server \uff0cEach server 4 x Samsung SM863 Series, 2.5\", 240 GB SSD, SATA-3 (6 Gb/s) MLC.
\n# Samsung SM863 Series, 2.5\", 240 GB SSD\n# from https://www.samsung.com/us/business/support/owners/product/sm863-series-240gb/\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ?M Q? T? | 520.00| 485.00|\n|RND4K Q? T? | ?| ?|\n|. IOPS | 97000.00| 20000.00|\n\n\u62a5\u544a\u7ed3\u679c\u663e\u793a
\n# 3 Node Cluster/ 4 x Samsung SM863 as OSD per Node\n# rados bench 60 write -b 4M -t 16\n# rados bench 60 read -t 16 (uses 4M from write)\n|Name | Read(MB/s)| Write(MB/s)|\n# 10 Gbit Network\n|------------|------------|------------|\n|SEQ4M Q? T16| 1064.42| 789.12|\n# 100 Gbit Network\n|------------|------------|------------|\n|SEQ4M Q? T16| 3087.82| 1011.63|\n\n\u53ef\u4ee5\u770b\u5230\u7f51\u7edc\u5e26\u5bbd\u5bf9\u6027\u80fd\u7684\u5f71\u54cd\u662f\u5de8\u5927\u7684\u3002\u867d\u7136 10 Gbit Network \u4e0b\u7684\u6027\u80fd\u4e0d\u8db3\uff0c\u4f46\u662f\u81f3\u5c11\u8bfb\u5199\u6027\u80fd\u90fd\u903c\u8fd1\u4e86\u5e26\u5bbd\u6781\u9650\u3002\u7136\u800c\u770b\u770b\u6211\u7684\u6d4b\u8bd5\u7ed3\u679c\uff0cWRITE \u975e\u5e38\u7cdf\u7cd5(296.25MB/s)
\n\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5\uff0c\u6d4b\u8bd5\u89c4\u6a21\u4e3a 3 x Server; Each server 4 x Micron 9300 Max 3.2 TB (MTFDHAL3T2TDR); 1 x 100 GbE DACs, in a full-mesh topology
\n# Micron 9300 Max 3.2 TB (MTFDHAL3T2TDR)\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------| \n|SEQ128KQ32T?| 3500.00| 3100.00| (MTFDHAL12T8TDR-1AT1ZABYY-Micron-LBGA-2022.pdf)\n|RND4K Q512T?| 3340.00| 840.00| (\u6839\u636e\u516c\u5f0f\u4f30\u7b97,throughput ~= iops * 4k / 1000)\n|. IOPS | 835000.00| 210000.00| (MTFDHAL12T8TDR-1AT1ZABYY-Micron-LBGA-2022.pdf)\n|------------|------------|------------| \n|RND4K Q1 T1 | | 205.82| (\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5)\n|. IOPS | | 51000.00| (\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5)\n|. latency ms| | 0.02| (\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5)\n\n\u62a5\u544a\u7ed3\u679c\u663e\u793a
\n# MULTI-VM WORKLOAD (LINUX)\n# \u6211\u4e0d\u7406\u89e3 Thread \u548c Job \u6709\u4ec0\u4e48\u533a\u522b\uff0c\u6587\u6863\u4e2d\u4e5f\u6ca1\u6709\u6807\u8bc6\u961f\u5217\u6df1\u5ea6\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ4M Q? T1 | 7176.00| 2581.00| (SEQUENTIAL BANDWIDTH BY NUMBER OF JOBS)\n|RND4K Q1 T1 | 86.00| 28.99| (\u6839\u636e\u516c\u5f0f\u4f30\u7b97)\n|. IOPS | 21502.00| 7248.00| (RANDOM IO/S BY NUMBER OF JOBS)\n\n\u540c\u6837\u7684\uff0cRND4K Q1 T1 WRITE \u6d4b\u8bd5\u7ed3\u679c\u975e\u5e38\u7cdf\u7cd5\uff0c\u53ea\u6709 7k iops\uff0c\u800c\u7269\u7406\u78c1\u76d8\u62e5\u6709 51k iops \uff0c\u8fd9\u6837\u7684\u5dee\u8ddd\u6211\u611f\u89c9\u662f\u65e0\u6cd5\u63a5\u53d7\u7684\u3002
\n\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5\uff0c\u6d4b\u8bd5\u89c4\u6a21\u4e3a 5 x Storage Server; Each Server 12HDD+3SSD, 3 x replication 2 x 10GbE NIC
\n# \u4ece\u62a5\u544a\u4e2d\u6458\u6284\u7684\u6d4b\u8bd5\u7ed3\u679c\n# Figure 8 Throughput/server comparison by using different configurations\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|SEQ4M Q64T1 | 1150.00| 300.00|\n\n\u8fd9\u6837\u7684\u60c5\u51b5\u4e0b SEQ4M Q64T1 \u6d4b\u8bd5\u7ed3\u679c\u4e2d write \u53ea\u6709\u5927\u7ea6 300MB/s \uff0c\u8fd9\u5927\u6982\u53ea\u662f\u5355\u5757 SAS \u7684\u4e24\u500d\uff0c\u4e5f\u5c31\u662f 2 x 158.16 MB/s (4M blocks)\u3002\u8fd9\u8ba9\u6211\u96be\u4ee5\u7f6e\u4fe1\uff0c\u5b83\u751a\u81f3\u5feb\u8fc7\u4e8e\u6211\u7684 nvme \u78c1\u76d8\u3002\u4e0d\u8fc7\u53e6\u4e00\u4e2a\u91cd\u8981\u4e8b\u5b9e\u662f 12*5=60 \u5757 hdd \u53ea\u6709 300MB/s \u7684\u987a\u5e8f\u5199\u5165\u901f\u5ea6\uff0c\u8fd9\u6837\u7684\u6027\u80fd\u635f\u8017\u662f\u4e0d\u662f\u592a\u5927\u4e86\uff1f
\n\u4ece\u62a5\u544a\u4e2d\u5f97\u77e5\uff0c\u6d4b\u8bd5\u89c4\u6a21\u4e3a 3 x Storage Server \uff1b Each Server 10 x micron 9300MAX 12.8T \uff0c2 x replication \uff0c2 x 100GbE NIC
\n# micron 9300MAX 12.8T (MTFDHAL12T8TDR-1AT1ZABYY) \u7269\u7406\u78c1\u76d8\u6d4b\u8bd5 \n|Name | Read(MB/s)| Write(MB/s)| (? \u662f\u672a\u7ed9\u51fa\u53c2\u6570)\n|------------|------------|------------|\n|SEQ?M Q? T? | 48360.00| ?| (\u4ece\u62a5\u544a\u4e2d\u6458\u6284)\n|SEQ128KQ32T?| 3500.00| 3500.00| (MTFDHAL12T8TDR-1AT1ZABYY-Micron-LBGA-2022.pdf)\n|RND4K Q512T?| 3400.00| 1240.00| (\u6839\u636e\u516c\u5f0f\u4f30\u7b97)\n|. IOPS | 850000.00| 310000.00| (MTFDHAL12T8TDR-1AT1ZABYY-Micron-LBGA-2022.pdf)\n|. latency us| 86.00| 11.00| (MTFDHAL12T8TDR-1AT1ZABYY-Micron-LBGA-2022.pdf)\n|------------|------------|------------|\n|RND4K Q? T? | 8397.77| 1908.11| (\u6839\u636e\u516c\u5f0f\u4f30\u7b97)\n|. IOPS | 2099444.00| 477029.00| (\u4ece\u62a5\u544a\u4e2d\u6458\u6284\uff0cExecutive Summary)\n|. latency ms| 1.50| 6.70| (\u4ece\u62a5\u544a\u4e2d\u6458\u6284\uff0cExecutive Summary)\n\n\u5728 WRITE \u6d4b\u8bd5\u7ed3\u679c\u5982\u4e0b\uff0c
\n# (\u4ece\u62a5\u544a\u4e2d\u6458\u6284)\n|Name | Read(MB/s)| Write(MB/s)|\n|------------|------------|------------|\n|RND4KQ32T100| ?| ?|\n|. IOPS | 2099444.00| 477029.00| (\u4e0d\u77e5\u9053\u662f\u4e0d\u662f\u5b98\u7f51\u62a5\u544a\u5b58\u5728\u95ee\u9898\uff0c\u8fd9\u91cc\u5c45\u7136\u6ca1\u6709\u4efb\u4f55\u6027\u80fd\u635f\u8017)\n|. latency ms| 1.52| 6.71|\n\n\u4e0d\u5f97\u4e0d\u8bf4 Micron \u5b98\u65b9\u7684\u6d4b\u8bd5\u5e73\u53f0\u8fc7\u4e8e\u9ad8\u7aef\uff0c\u4e0d\u662f\u6211\u4eec\u4e2d\u5c0f\u578b\u4f01\u4e1a\u8d1f\u62c5\u5f97\u8d77\u3002
\n\u4ece\u7ed3\u679c\u4e2d\u5f97\u77e5\uff0cWRITE \u63a5\u8fd1\u4e8e\u5355\u5757\u7269\u7406\u78c1\u76d8\u6027\u80fd\u3002\u90a3\u4e48\u662f\u5426\u8bf4\u660e\uff0c\u5982\u679c\u53ea\u4f7f\u7528\u5355\u4e2a\u8282\u70b9\u5355\u4e2a\u78c1\u76d8\uff0c\u90a3\u4e48 WRITE \u6027\u80fd\u5c06\u4f1a\u4e0b\u964d\u5230 477k / 30 = 15.9k iops ? \u5982\u679c\u662f\u7684\u8bdd\uff0c\u90a3\u8fd9\u5c06\u662f sata ssd \u7684\u6027\u80fd\u3002
\n