$ sudo vi /etc/fstab
$ cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Thu Jun 12 01:18:32 2025
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/ocivolume-root / xfs defaults 0 0
UUID=dd88872e-0527-4193-8282-b8281f1ae6fd /boot xfs defaults 0 0
UUID=AE3C-806E /boot/efi vfat defaults,uid=0,gid=0,umask=077,shortname=winnt 0 2
/dev/mapper/ocivolume-oled /var/oled xfs defaults 0 0
tmpfs /dev/shm tmpfs defaults,nodev,nosuid,noexec 0 0
######################################
## ORACLE CLOUD INFRASTRUCTURE CUSTOMERS
##
## If you are adding an iSCSI remote block volume to this file you MUST
## include the '_netdev' mount option or your instance will become
## unavailable after the next reboot.
## SCSI device names are not stable across reboots; please use the device UUID instead of /dev path.
##
## Example:
## UUID="94c5aade-8bb1-4d55-ad0c-388bb8aa716a" /data1 xfs defaults,noatime,_netdev 0 2
##
## More information:
## https://docs.us-phoenix-1.oraclecloud.com/Content/Block/Tasks/connectingtoavolume.htm
/.swapfile none swap sw 0 0
/var/oled/swapfile none swap sw 0 0
$
準備2-3: パッケージを最新へアップデート
現時点での最新へアップデートする
$ sudo dnf update -y
<略>
$ sudo reboot
手順1-4: 日本語Locale対応
ja_JP.UTF-8など日本語Localeで設定した際、「Failed to set locale, defaulting to C.UTF-8」というメッセージが出力される場合があります。
[opc@ocimail ~]$ sudo dehydrated --register
# INFO: Using main config file /etc/dehydrated/config
# INFO: Using additional config file /etc/dehydrated/conf.d/local.sh
To use dehydrated with this certificate authority you have to agree to their terms of service which you can find here: https://letsencrypt.org/documents/LE-SA-v1.5-February-24-2025.pdf
To accept these terms of service run "/bin/dehydrated --register --accept-terms".
[opc@ocimail ~]$ sudo dehydrated --register --accept-terms
# INFO: Using main config file /etc/dehydrated/config
# INFO: Using additional config file /etc/dehydrated/conf.d/local.sh
+ Generating account key...
+ Registering account key with ACME server...
+ Fetching account URL...
+ Done!
[opc@ocimail ~]$
初回のSSL証明書発行処理を実行します。
[opc@ocimail ~]$ sudo dehydrated --cron
# INFO: Using main config file /etc/dehydrated/config
# INFO: Using additional config file /etc/dehydrated/conf.d/local.sh
Processing ocimail.websa.jp
+ Signing domains...
+ Generating private key...
+ Generating signing request...
+ Requesting new certificate order from CA...
+ Received 1 authorizations URLs from the CA
+ Handling authorization for ホスト名
+ 1 pending challenge(s)
+ Deploying challenge tokens...
+ Responding to challenge for ocimail.websa.jp authorization...
+ Challenge is valid!
+ Cleaning challenge tokens...
+ Requesting certificate...
Warning: Will read cert request from stdin since no -in option is given
+ Checking certificate...
+ Done!
+ Creating fullchain.pem...
+ Done!
+ Running automatic cleanup
Moving unused file to archive directory: ホスト名/cert-1761722366.csr
Moving unused file to archive directory: ホスト名/cert-1761722366.pem
Moving unused file to archive directory: ホスト名/privkey-1761722366.pem
[opc@ocimail ~]$
これで/etc/dehydrated/certs/ホスト名/ にSSL証明書が作成される
[opc@ocimail ~]$ sudo ls -l /etc/dehydrated/certs/ホスト名/
total 20
-rw-------. 1 root root 1655 Oct 29 07:26 cert-1761722787.csr
-rw-------. 1 root root 2134 Oct 29 07:26 cert-1761722787.pem
lrwxrwxrwx. 1 root root 19 Oct 29 07:26 cert.csr -> cert-1761722787.csr
lrwxrwxrwx. 1 root root 19 Oct 29 07:26 cert.pem -> cert-1761722787.pem
-rw-------. 1 root root 1802 Oct 29 07:26 chain-1761722787.pem
lrwxrwxrwx. 1 root root 20 Oct 29 07:26 chain.pem -> chain-1761722787.pem
-rw-------. 1 root root 3936 Oct 29 07:26 fullchain-1761722787.pem
lrwxrwxrwx. 1 root root 24 Oct 29 07:26 fullchain.pem -> fullchain-1761722787.pem
-rw-------. 1 root root 3272 Oct 29 07:26 privkey-1761722787.pem
lrwxrwxrwx. 1 root root 22 Oct 29 07:26 privkey.pem -> privkey-1761722787.pem
[opc@ocimail ~]$
dovecot設定
dovecot ssl設定
/etc/dovecot/conf.d/10-ssl.conf にて
「ssl = required」を「ssl = yes」に変更
# SSL/TLS support: yes, no, required. <doc/wiki/SSL.txt>
# disable plain pop3 and imap, allowed are only pop3+TLS, pop3s, imap+TLS and imaps
# plain imap and pop3 are still allowed for local connections
ssl = yes
ssl_certとssl_keyをdehydratedが出力したファイルに置き換え
# PEM encoded X.509 SSL/TLS certificate and private key. They're opened before
# dropping root privileges, so keep the key file unreadable by anyone but
# root. Included doc/mkcert.sh can be used to easily generate self-signed
# certificate, just make sure to update the domains in dovecot-openssl.cnf
ssl_cert = </etc/dehydrated/certs/ホスト名/fullchain.pem
ssl_key = </etc/dehydrated/certs/ホスト名/privkey.pem
dhパラメータのコメントを取る
# SSL DH parameters
# Generate new params with `openssl dhparam -out /etc/dovecot/dh.pem 4096`
# Or migrate from old ssl-parameters.dat file with the command dovecot
# gives on startup when ssl_dh is unset.
ssl_dh = </etc/dovecot/dh.pem
# Space separated list of plugins to load for all services. Plugins specific to
# IMAP, LDA, etc. are added to this list in their own .conf files.
mail_plugins = $mail_plugins quota
##
## Mailbox handling optimizations
##
# Mailbox list indexes can be used to optimize IMAP STATUS commands. They are
# also required for IMAP NOTIFY extension to be enabled.
mailbox_list_index = yes
# Disable LOGIN command and all other plaintext authentications unless
# SSL/TLS is used (LOGINDISABLED capability). Note that if the remote IP
# matches the local IP (ie. you're connecting from the same computer), the
# connection is considered secure and plaintext authentication is allowed.
# See also ssl=required setting.
#disable_plaintext_auth = yes
また、下記を「auth_mechanism = plain login」に変更する
# Space separated list of wanted authentication mechanisms:
# plain login digest-md5 cram-md5 ntlm rpa apop anonymous gssapi otp
# gss-spnego
# NOTE: See also disable_plaintext_auth setting.
auth_mechanisms = plain
protocol imap {
# Space separated list of plugins to load (default is global mail_plugins).
mail_plugins = $mail_plugins imap_quota
# Maximum number of IMAP connections allowed for a user from each IP address.
# NOTE: The username is compared case-sensitively.
#mail_max_userip_connections = 10
}
##
## Mailbox definitions
##
# Each mailbox is specified in a separate mailbox section. The section name
# specifies the mailbox name. If it has spaces, you can put the name
# "in quotes". These sections can contain the following mailbox settings:
#
# auto:
# Indicates whether the mailbox with this name is automatically created
# implicitly when it is first accessed. The user can also be automatically
# subscribed to the mailbox after creation. The following values are
# defined for this setting:
#
# no - Never created automatically.
# create - Automatically created, but no automatic subscription.
# subscribe - Automatically created and subscribed.
#
# special_use:
# A space-separated list of SPECIAL-USE flags (RFC 6154) to use for the
# mailbox. There are no validity checks, so you could specify anything
# you want in here, but it's not a good idea to use flags other than the
# standard ones specified in the RFC:
#
# \All - This (virtual) mailbox presents all messages in the
# user's message store.
# \Archive - This mailbox is used to archive messages.
# \Drafts - This mailbox is used to hold draft messages.
# \Flagged - This (virtual) mailbox presents all messages in the
# user's message store marked with the IMAP \Flagged flag.
# \Important - This (virtual) mailbox presents all messages in the
# user's message store deemed important to user.
# \Junk - This mailbox is where messages deemed to be junk mail
# are held.
# \Sent - This mailbox is used to hold copies of messages that
# have been sent.
# \Trash - This mailbox is used to hold messages that have been
# deleted.
#
# comment:
# Defines a default comment or note associated with the mailbox. This
# value is accessible through the IMAP METADATA mailbox entries
# "/shared/comment" and "/private/comment". Users with sufficient
# privileges can override the default value for entries with a custom
# value.
# NOTE: Assumes "namespace inbox" has been defined in 10-mail.conf.
namespace inbox {
# These mailboxes are widely used and could perhaps be created automatically:
mailbox Drafts {
special_use = \Drafts
}
mailbox "下書き" {
special_use = \Drafts
}
mailbox INBOX.Drafts {
special_use = \Drafts
}
mailbox Junk {
special_use = \Junk
}
mailbox "迷惑メール" {
special_use = \Junk
}
mailbox "Junk Email" {
special_use = \Junk
}
mailbox "INBOX.Junk Email" {
special_use = \Junk
}
mailbox Trash {
special_use = \Trash
}
mailbox "削除済みアイテム" {
special_use = \Trash
}
mailbox "Deleted Items" {
special_use = \Trash
}
mailbox "INBOX.Deleted Items" {
special_use = \Trash
}
# For \Sent mailboxes there are two widely used names. We'll mark both of
# them as \Sent. User typically deletes one of them if duplicates are created.
mailbox Sent {
special_use = \Sent
}
mailbox "Sent Messages" {
special_use = \Sent
}
mailbox "送信済みアイテム" {
special_use = \Sent
}
mailbox "Sent Items" {
special_use = \Sent
}
mailbox "INBOX.Sent Items" {
special_use = \Sent
}
# If you have a virtual "All messages" mailbox:
#mailbox virtual/All {
# special_use = \All
# comment = All my messages
#}
# If you have a virtual "Flagged" mailbox:
#mailbox virtual/Flagged {
# special_use = \Flagged
# comment = All my flagged messages
#}
# If you have a virtual "Important" mailbox:
#mailbox virtual/Important {
# special_use = \Important
# comment = All my important messages
#}
}
dovecotのquota設定
既存の /etc/dovecot/conf.d/90-quota.conf の最後に下記を追加する
これは100MBで制限する例
mailbox_list_index = yes
# Avoid spending excessive time waiting for the quota calculation to finish
# when mails' vsizes aren't already cached. If this many mails are opened,
# finish the quota calculation on background in indexer-worker process. Mail
# deliveries will be assumed to succeed, and explicit quota lookups will
# return internal error. (v2.2.28+)
protocol !indexer-worker {
mail_vsize_bg_after_count = 100
}
plugin {
quota = count:User quota
quota_rule = *:storage=100M
#quota_rule2 = Trash:storage=+100M
# This is required - it uses "virtual sizes" rather than "physical sizes"
# for quota counting:
quota_vsizes = yes
}
# Relay server configuration:
#
# The Dovecot SMTP submission service directly proxies the mail transaction
# to the SMTP relay configured here.
# Host name for the relay server (required)
submission_relay_host = ホスト名.ドメン名
# INTERNET HOST AND DOMAIN NAMES
#
# The myhostname parameter specifies the internet hostname of this
# mail system. The default is to use the fully-qualified domain name
# from gethostname(). $myhostname is used as a default value for many
# other configuration parameters.
#
#myhostname = host.domain.tld
#myhostname = virtual.domain.tld
myhostname = ホスト名.ドメイン名
続いてmydomain も 外部で認識させたいドメイン名を入れる
# The mydomain parameter specifies the local internet domain name.
# The default is to use $myhostname minus the first component.
# $mydomain is used as a default value for many other configuration
# parameters.
#
#mydomain = domain.tld
mydomain = ドメイン名
# SENDING MAIL
#
# The myorigin parameter specifies the domain that locally-posted
# mail appears to come from. The default is to append $myhostname,
# which is fine for small sites. If you run a domain with multiple
# machines, you should (1) change this to $mydomain and (2) set up
# a domain-wide alias database that aliases each user to
# user@that.users.mailhost.
#
# For the sake of consistency between sender and recipient addresses,
# myorigin also specifies the default domain name that is appended
# to recipient addresses that have no @domain part.
#
#myorigin = $myhostname
myorigin = $mydomain
# The inet_interfaces parameter specifies the network interface
# addresses that this mail system receives mail on. By default,
# the software claims all active interfaces on the machine. The
# parameter also controls delivery of mail to user@[ip.address].
#
# See also the proxy_interfaces parameter, for network addresses that
# are forwarded to us via a proxy or network address translator.
#
# Note: you need to stop/start Postfix when this parameter changes.
#
inet_interfaces = all
#inet_interfaces = $myhostname
#inet_interfaces = $myhostname, localhost
#inet_interfaces = localhost
# Enable IPv4, and IPv6 if supported
inet_protocols = all
自サーバで受信するメールドメインの設定は2番目のドメイン宛ても受け取るやつにします
# See also below, section "REJECTING MAIL FOR UNKNOWN LOCAL USERS".
#
#mydestination = $myhostname, localhost.$mydomain, localhost
mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain
#mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain,
# mail.$mydomain, www.$mydomain, ftp.$mydomain
# The right-hand side of the lookup tables is conveniently ignored.
# In the left-hand side, specify a bare username, an @domain.tld
# wild-card, or specify a user@domain.tld address.
#
local_recipient_maps = unix:passwd.byname $alias_maps
#local_recipient_maps = proxy:unix:passwd.byname $alias_maps
#local_recipient_maps =
# With Postfix version before 2.10, use smtpd_recipient_restrictions
smtpd_relay_restrictions = permit_mynetworks, permit_sasl_authenticated, reject_unauth_destination
postfixのssl証明書設定
/etc/postfix/main.cf の最後の方にSSL関連の設定がある
# The full pathname of a file with the Postfix SMTP server RSA certificate
# in PEM format. Intermediate certificates should be included in general,
# the server certificate first, then the issuing CA(s) (bottom-up order).
#
smtpd_tls_cert_file = /etc/dehydrated/certs/ocimail.websa.jp/fullchain.pem
# The full pathname of a file with the Postfix SMTP server RSA private key
# in PEM format. The private key must be accessible without a pass-phrase,
# i.e. it must not be encrypted.
#
smtpd_tls_key_file = /etc/dehydrated/certs/ocimail.websa.jp/privkey.pem
postfix側からdovecotを起動する設定
/etc/postfix/master.cf の最終行に以下を追加
dovecot unix - n n - - pipe
flags=DRhu user=vmail:vmail argv=/usr/libexec/dovecot/dovecot-lda -f ${sender} -d ${recipient}
smtpd_sasl_auth_enable = yes
broken_sasl_auth_clients = yes
# dovecot sasl configuration
smtpd_sasl_type = dovecot
# Can be an absolute path, or relative to $queue_directory
# Debian/Ubuntu users: Postfix is setup by default to run chrooted, so it is best to leave it as-is below
smtpd_sasl_path = private/auth
# and the common settings to enable SASL:
smtpd_sasl_auth_enable = yes
smtpd_sasl_security_options = noanonymous
トラブル対応
gmail宛てがgmail側で受け取り拒否
gmailに送ったところ、メールがなかなか届かないので/var/log/maillogを確認
Oct 30 10:15:28 ocimail postfix/smtp[5898]: AA57A309E87E: to=<アドレス@gmail.com>, relay=gmail-smtp-in.l.google.com[2404:6800:4008:c15::1b]:25, delay=2.6, delays=0.29/0.19/1.2/0.9, dsn=5.7.1, status=bounced (host gmail-
smtp-in.l.google.com[2404:6800:4008:c15::1b] said: 550-5.7.1 [2603:c023:f:9000:0:7a41:2e84:f27f] Gmail has detected that this 550-5.7.1 message does not meet IPv6 sending guidelines regarding PTR records 550-5.7.1 and auth
entication. For more information, go to 550 5.7.1 https://support.google.com/mail/?p=IPv6AuthError 41be03b00d2f7-b71f4ba78c7si7488417a12.1056 - gsmtp (in reply to end of DATA command))
「Gmail has detected that this 550-5.7.1 message does not meet IPv6 sending guidelines regarding PTR records 550-5.7.1 and authentication. For more information, go to 550 5.7.1 https://support.google.com/mail/?p=IPv6AuthError」という形で拒否されていた。
IPv6 認証エラーの例を次に示します。 550-5.7.1: Message does not meet IPv6 sending guidelines regarding PTR records and authentication.(メールが PTR レコードと認証に関する IPv6 の送信ガイドラインに準拠していません。)
あと、最初の方に書かれているIPアドレスに対する要件も関係している
インフラストラクチャ設定の要件とガイドライン IP アドレス 重要: 送信元 IP アドレスは、ポインタ(PTR)レコードで指定されたホスト名の IP アドレスと一致している必要があります。
送信元 SMTP サーバーのパブリック IP アドレスには、対応するホスト名を参照する PTR レコードが必要です。これは、リバース DNS ルックアップと呼ばれます。このホスト名には、送信元サーバーと同じパブリック IP アドレスを参照する A レコード(IPv4 の場合)または AAAA レコード(IPv6 の場合)も必要です。これは、フォワード DNS ルックアップと呼ばれます。 送信元サーバーの IP アドレスとドメインを対応付ける有効なリバース DNS レコードを設定します。Google 管理者ツールボックスの Dig ツールを使用して PTR レコードを確認します。 重要: 送信元 IP アドレスは、ポインタ(PTR)レコードで指定されたホスト名の IP アドレスと一致している必要があります。
[root@esxi:~] esxcli storage core device list
t10.ATA_____W800S_256GB_____________________________2202211088199_______
Display Name: Local ATA Disk (t10.ATA_____W800S_256GB_____________________________2202211088199_______)
Has Settable Display Name: true
Size: 244198
Device Type: Direct-Access
Multipath Plugin: HPP
Devfs Path: /vmfs/devices/disks/t10.ATA_____W800S_256GB_____________________________2202211088199_______
Vendor: ATA
Model: W800S 256GB
Revision: 3G5A
SCSI Level: 5
Is Pseudo: false
Status: on
Is RDM Capable: false
Is Local: true
Is Removable: false
Is SSD: true
Is VVOL PE: false
Is Offline: false
Is Perennially Reserved: false
Queue Full Sample Size: 0
Queue Full Threshold: 0
Thin Provisioning Status: yes
Attached Filters:
VAAI Status: unsupported
Other UIDs: vml.01000000003232303232313130383831393920202020202020573830305320
Is Shared Clusterwide: false
Is SAS: false
Is USB: false
Is Boot Device: true
Device Max Queue Depth: 31
No of outstanding IOs with competing worlds: 31
Drive Type: unknown
RAID Level: unknown
Number of Physical Drives: unknown
Protection Enabled: false
PI Activated: false
PI Type: 0
PI Protection Mask: NO PROTECTION
Supported Guard Types: NO GUARD SUPPORT
DIX Enabled: false
DIX Guard Type: NO GUARD SUPPORT
Emulated DIX/DIF Enabled: false
t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
Display Name: Local NVMe Disk (t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000)
Has Settable Display Name: true
Size: 1953514
Device Type: Direct-Access
Multipath Plugin: HPP
Devfs Path: /vmfs/devices/disks/t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
Vendor: NVMe
Model: SPD SP700-2TNGH
Revision: SP02203A
SCSI Level: 0
Is Pseudo: false
Status: on
Is RDM Capable: false
Is Local: true
Is Removable: false
Is SSD: true
Is VVOL PE: false
Is Offline: false
Is Perennially Reserved: false
Queue Full Sample Size: 0
Queue Full Threshold: 0
Thin Provisioning Status: no
Attached Filters:
VAAI Status: unsupported
Other UIDs: vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b89f9e2
Is Shared Clusterwide: false
Is SAS: false
Is USB: false
Is Boot Device: false
Device Max Queue Depth: 1023
No of outstanding IOs with competing worlds: 32
Drive Type: physical
RAID Level: NA
Number of Physical Drives: 1
Protection Enabled: false
PI Activated: false
PI Type: 0
PI Protection Mask: NO PROTECTION
Supported Guard Types: NO GUARD SUPPORT
DIX Enabled: false
DIX Guard Type: NO GUARD SUPPORT
Emulated DIX/DIF Enabled: false
[root@esxi:~]
パーテーションは1番の方なので下記を実施
[root@esxi:~] voma -m vmfs -f check -N -d /vmfs/devices/disks/vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b89f9e2:1
Running VMFS Checker version 2.1 in check mode
Initializing LVM metadata, Basic Checks will be done
Checking for filesystem activity
Performing filesystem liveness check..|Scanning for VMFS-6 host activity (4096 bytes/HB, 1024 HBs).
Reservation Support is not present for NVME devices
Performing filesystem liveness check..|
########################################################################
# Warning !!! #
# #
# You are about to execute VOMA without device reservation. #
# Any access to this device from other hosts when VOMA is running #
# can cause severe data corruption #
# #
# This mode is supported only under VMware support supervision. #
########################################################################
Do you want to continue (Y/N)?
0) _Yes
1) _No
Select a number from 0-1: 0
Phase 1: Checking VMFS header and resource files
Detected VMFS-6 file system (labeled:'nvme2tb') with UUID:68e4cab1-0a865c28-49c0-04ab182311d3, Version 6:82
Phase 2: Checking VMFS heartbeat region
Phase 3: Checking all file descriptors.
Phase 4: Checking pathname and connectivity.
Phase 5: Checking resource reference counts.
Total Errors Found: 0
[root@esxi:~]
[root@esxi:~] esxcli storage core path list
sata.vmhba0-sata.0:1-t10.ATA_____W800S_256GB_____________________________2202211088199_______
UID: sata.vmhba0-sata.0:1-t10.ATA_____W800S_256GB_____________________________2202211088199_______
Runtime Name: vmhba0:C0:T1:L0
Device: t10.ATA_____W800S_256GB_____________________________2202211088199_______
Device Display Name: Local ATA Disk (t10.ATA_____W800S_256GB_____________________________2202211088199_______)
Adapter: vmhba0
Controller: Not Applicable
Channel: 0
Target: 1
LUN: 0
Plugin: HPP
State: active
Transport: sata
Adapter Identifier: sata.vmhba0
Target Identifier: sata.0:1
Adapter Transport Details: Unavailable or path is unclaimed
Target Transport Details: Unavailable or path is unclaimed
Maximum IO Size: 33554432
pcie.300-pcie.0:0-t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
UID: pcie.300-pcie.0:0-t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
Runtime Name: vmhba1:C0:T0:L0
Device: t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
Device Display Name: Local NVMe Disk (t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000)
Adapter: vmhba1
Controller: nqn.2014-08.org.nvmexpress_1e4b_SPD_SP700-2TNGH_________________________0901SP7007D00399
Channel: 0
Target: 0
LUN: 0
Plugin: HPP
State: active
Transport: pcie
Adapter Identifier: pcie.300
Target Identifier: pcie.0:0
Adapter Transport Details: Unavailable or path is unclaimed
Target Transport Details: Unavailable or path is unclaimed
Maximum IO Size: 524288
[root@esxi:~]
[root@esxi:~] esxcfg-mpath -b
t10.ATA_____W800S_256GB_____________________________2202211088199_______ : Local ATA Disk (t10.ATA_____W800S_256GB_____________________________2202211088199_______)
vmhba0:C0:T1:L0 LUN:0 state:active Local HBA vmhba0 channel 0 target 1
t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000 : Local NVMe Disk (t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000)
vmhba1:C0:T0:L0 LUN:0 state:active Local HBA vmhba1 channel 0 target 0
[root@esxi:~]
[root@esxi:~] esxcli storage core device list
t10.ATA_____W800S_256GB_____________________________2202211088199_______
Display Name: Local ATA Disk (t10.ATA_____W800S_256GB_____________________________2202211088199_______)
Has Settable Display Name: true
Size: 244198
Device Type: Direct-Access
Multipath Plugin: HPP
Devfs Path: /vmfs/devices/disks/t10.ATA_____W800S_256GB_____________________________2202211088199_______
Vendor: ATA
Model: W800S 256GB
Revision: 3G5A
SCSI Level: 5
Is Pseudo: false
Status: on
Is RDM Capable: false
Is Local: true
Is Removable: false
Is SSD: true
Is VVOL PE: false
Is Offline: false
Is Perennially Reserved: false
Queue Full Sample Size: 0
Queue Full Threshold: 0
Thin Provisioning Status: yes
Attached Filters:
VAAI Status: unsupported
Other UIDs: vml.01000000003232303232313130383831393920202020202020573830305320
Is Shared Clusterwide: false
Is SAS: false
Is USB: false
Is Boot Device: true
Device Max Queue Depth: 31
No of outstanding IOs with competing worlds: 31
Drive Type: unknown
RAID Level: unknown
Number of Physical Drives: unknown
Protection Enabled: false
PI Activated: false
PI Type: 0
PI Protection Mask: NO PROTECTION
Supported Guard Types: NO GUARD SUPPORT
DIX Enabled: false
DIX Guard Type: NO GUARD SUPPORT
Emulated DIX/DIF Enabled: false
t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
Display Name: Local NVMe Disk (t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000)
Has Settable Display Name: true
Size: 1953514
Device Type: Direct-Access
Multipath Plugin: HPP
Devfs Path: /vmfs/devices/disks/t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
Vendor: NVMe
Model: SPD SP700-2TNGH
Revision: SP02203A
SCSI Level: 0
Is Pseudo: false
Status: on
Is RDM Capable: false
Is Local: true
Is Removable: false
Is SSD: true
Is VVOL PE: false
Is Offline: false
Is Perennially Reserved: false
Queue Full Sample Size: 0
Queue Full Threshold: 0
Thin Provisioning Status: no
Attached Filters:
VAAI Status: unsupported
Other UIDs: vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b89f9e2
Is Shared Clusterwide: false
Is SAS: false
Is USB: false
Is Boot Device: false
Device Max Queue Depth: 1023
No of outstanding IOs with competing worlds: 32
Drive Type: physical
RAID Level: NA
Number of Physical Drives: 1
Protection Enabled: false
PI Activated: false
PI Type: 0
PI Protection Mask: NO PROTECTION
Supported Guard Types: NO GUARD SUPPORT
DIX Enabled: false
DIX Guard Type: NO GUARD SUPPORT
Emulated DIX/DIF Enabled: false
[root@esxi:~]
[root@esxi:~] esxcfg-scsidevs -c
Device UID Device Type Console Device Size Multipath PluginDisplay Name
t10.ATA_____W800S_256GB_____________________________2202211088199_______ Direct-Access /vmfs/devices/disks/t10.ATA_____W800S_256GB_____________________________2202211088199_______ 244198MB HPP Local ATA Disk (t10.ATA_____W800S_256GB_____________________________2202211088199_______)
t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000 Direct-Access /vmfs/devices/disks/t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000 1953514MB HPP Local NVMe Disk (t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000)
[root@esxi:~]
vmfsに関する出力となると、2TBデバイスが登場しない
[root@esxi:~] esxcli storage vmfs extent list
Volume Name VMFS UUID Extent Number Device Name Partition
------------------------------------------ ----------------------------------- ------------- ------------------------------------------------------------------------ ---------
datastore1 68cad69a-e82d8e40-5b65-5bb7fb6107f2 0 t10.ATA_____W800S_256GB_____________________________2202211088199_______ 8
OSDATA-68cad69a-d23fb18e-73e5-5bb7fb6107f2 68cad69a-d23fb18e-73e5-5bb7fb6107f2 0 t10.ATA_____W800S_256GB_____________________________2202211088199_______ 7
[root@esxi:~]
[root@esxi:~] esxcli system module parameters list --module=nvme_pcie
Name Type Value Description
--------------------------- ---- ----- -----------
nvmePCIEBlkSizeAwarePollAct int NVMe PCIe block size aware poll activate. Valid if poll activated. Default activated.
nvmePCIEDebugMask int NVMe PCIe driver debug mask
nvmePCIEDma4KSwitch int NVMe PCIe 4k-alignment DMA
nvmePCIEFakeAdminQSize uint NVMe PCIe fake ADMIN queue size. 0's based
nvmePCIELogLevel int NVMe PCIe driver log level
nvmePCIEMsiEnbaled int NVMe PCIe MSI interrupt enable
nvmePCIEPollAct int NVMe PCIe hybrid poll activate, MSIX interrupt must be enabled. Default activated.
nvmePCIEPollInterval uint NVMe PCIe hybrid poll interval between each poll in microseconds. Valid if poll activated. Default 50us.
nvmePCIEPollOIOThr uint NVMe PCIe hybrid poll OIO threshold of automatic switch from interrupt to poll. Valid if poll activated. Default 30 OIO commands per IO queue.
[root@esxi:~] esxcli system module parameters list --module=vmknvme
Name Type Value Description
------------------------------------- ---- ----- -----------
vmknvme_adapter_num_cmpl_queues uint Number of PSA completion queues for NVMe-oF adapter, min: 1, max: 16, default: 4
vmknvme_bind_intr uint If enabled, the interrupt cookies are binded to completion worlds. This parameter is only applied when using driver completion worlds.
vmknvme_compl_world_type uint completion world type, PSA: 0, VMKNVME: 1
vmknvme_ctlr_recover_initial_attempts uint Number of initial controller recover attempts, MIN: 2, MAX: 30
vmknvme_ctlr_recover_method uint controller recover method after initial recover attempts, RETRY: 0, DELETE: 1
vmknvme_cw_rate uint Number of completion worlds per IO queue (NVMe/PCIe only). Number is a power of 2. Applies when number of queues less than 4.
vmknvme_enable_noiob uint If enabled, driver will split the commands based on NOIOB.
vmknvme_hostnqn_format uint HostNQN format, UUID: 0, HostName: 1
vmknvme_io_queue_num uint vmknvme IO queue number for NVMe/PCIe adapter: pow of 2 in [1, 16]
vmknvme_io_queue_size uint IO queue size: [8, 1024]
vmknvme_iosplit_workaround uint If enabled, qdepth in PSA layer is half size of vmknvme settings.
vmknvme_log_level uint log level: [0, 20]
vmknvme_max_prp_entries_num uint User defined maximum number of PRP entries per controller:default value is 0
vmknvme_stats uint Nvme statistics per controller (NVMe/PCIe only now). Logical OR of flags for collecting. 0x0 for disable, 0x1 for basic data (IO pattern), 0x2 for histogram without IO block size, 0x4 for histogram with IO block size. Default 0x2.
vmknvme_total_io_queue_size uint Aggregated IO queue size of a controller, MIN: 64, MAX: 4096
vmknvme_use_default_domain_name uint If set to 1, the default domain name "com.vmware", not the system domain name will always be used to generate host NQN. Not used: 0, used: 1, default: 0
[root@esxi:~] esxcli system module parameters list --module=vmknvme_vmkapi_compat
[root@esxi:~]
[root@esxi:~] esxcli nvme device list
HBA Name Status Signature
-------- ------ ---------
vmhba1 Online nvmeMgmt-nvmhba0
[root@esxi:~] esxcli nvme device get -A vmhba1
Controller Identify Info:
PCIVID: 0x1e4b
PCISSVID: 0x1e4b
Serial Number: 0901SP7007D00399
Model Number: SPD SP700-2TNGH
Firmware Revision: SP02203A
Recommended Arbitration Burst: 0
IEEE OUI Identifier: 000000
Controller Associated with an SR-IOV Virtual Function: false
Controller Associated with a PCI Function: true
NVM Subsystem May Contain Two or More Controllers: false
NVM Subsystem Contains Only One Controller: true
NVM Subsystem May Contain Two or More PCIe Ports: false
NVM Subsystem Contains Only One PCIe Port: true
Max Data Transfer Size: 7
Controller ID: 0
Version: 1.4
RTD3 Resume Latency: 500000 us
RTD3 Entry Latency: 2000000 us
Optional Firmware Activation Event Support: true
Optional Namespace Attribute Changed Event Support: false
Host Identifier Support: false
Namespace Management and Attachment Support: false
Firmware Activate and Download Support: true
Format NVM Support: true
Security Send and Receive Support: true
Abort Command Limit: 2
Async Event Request Limit: 3
Firmware Activate Without Reset Support: true
Firmware Slot Number: 3
The First Slot Is Read-only: false
Telemetry Log Page Support: false
Command Effects Log Page Support: true
SMART/Health Information Log Page per Namespace Support: false
Error Log Page Entries: 63
Number of Power States Support: 4
Format of Admin Vendor Specific Commands Is Same: true
Format of Admin Vendor Specific Commands Is Vendor Specific: false
Autonomous Power State Transitions Support: true
Warning Composite Temperature Threshold: 363
Critical Composite Temperature Threshold: 368
Max Time for Firmware Activation: 200 * 100ms
Host Memory Buffer Preferred Size: 8192 * 4KB
Host Memory Buffer Min Size: 8192 * 4KB
Total NVM Capacity: 0x1dceea56000
Unallocated NVM Capacity: 0x0
Access Size: 0 * 512B
Total Size: 0 * 128KB
Authentication Method: 0
Number of RPMB Units: 0
Keep Alive Support: 0
Max Submission Queue Entry Size: 64 Bytes
Required Submission Queue Entry Size: 64 Bytes
Max Completion Queue Entry Size: 16 Bytes
Required Completion Queue Entry Size: 16 Bytes
Max Outstanding Commands: 0
Number of Namespaces: 1
Reservation Support: false
Save/Select Field in Set/Get Feature Support: true
Write Zeroes Command Support: true
Dataset Management Command Support: true
Write Uncorrectable Command Support: true
Compare Command Support: true
Fused Operation Support: false
Cryptographic Erase as Part of Secure Erase Support: false
Cryptographic Erase and User Data Erase to All Namespaces: false
Cryptographic Erase and User Data Erase to One Particular Namespace: true
Format Operation to All Namespaces: false
Format Opertaion to One Particular Namespace: true
Volatile Write Cache Is Present: true
Atomic Write Unit Normal: 0 Logical Blocks
Atomic Write Unit Power Fail: 0 Logical Blocks
Format of All NVM Vendor Specific Commands Is Same: false
Format of All NVM Vendor Specific Commands Is Vendor Specific: true
Atomic Compare and Write Unit: 0
SGL Address Specify Offset Support: false
MPTR Contain SGL Descriptor Support: false
SGL Length Able to Larger than Data Amount: false
SGL Length Shall Be Equal to Data Amount: true
Byte Aligned Contiguous Physical Buffer of Metadata Support: false
SGL Bit Bucket Descriptor Support: false
SGL Keyed SGL Data Block Descriptor Support: false
SGL for NVM Command Set Support: false
NVM Subsystem NVMe Qualified Name:
NVM Subsystem NVMe Qualified Name (hex format):
[root@esxi:~]
[root@esxi:~] esxcli storage vmfs lockmode list
Volume Name UUID Type Locking Mode ATS Compatible ATS Upgrade Modes ATS Incompatibility Reason
------------------------------------------ ----------------------------------- -------- ------------ -------------- ----------------- --------------------------
datastore1 68cad69a-e82d8e40-5b65-5bb7fb6107f2 VMFS-6 ATS+SCSI false None Device does not support ATS
OSDATA-68cad69a-d23fb18e-73e5-5bb7fb6107f2 68cad69a-d23fb18e-73e5-5bb7fb6107f2 Non-VMFS ATS+SCSI false None Device does not support ATS
[root@esxi:~]
vomaコマンドでファイルシステムチェック
[root@esxi:~] ls /vmfs/devices/disks/
t10.ATA_____W800S_256GB_____________________________2202211088199_______
t10.ATA_____W800S_256GB_____________________________2202211088199_______:1
t10.ATA_____W800S_256GB_____________________________2202211088199_______:5
t10.ATA_____W800S_256GB_____________________________2202211088199_______:6
t10.ATA_____W800S_256GB_____________________________2202211088199_______:7
t10.ATA_____W800S_256GB_____________________________2202211088199_______:8
t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000
t10.NVMe____SPD_SP7002D2TNGH_________________________0200000000000000:1
vml.0100000000303230305f303030305f303030305f3030303000535044205350
vml.0100000000303230305f303030305f303030305f3030303000535044205350:1
vml.01000000003232303232313130383831393920202020202020573830305320
vml.01000000003232303232313130383831393920202020202020573830305320:1
vml.01000000003232303232313130383831393920202020202020573830305320:5
vml.01000000003232303232313130383831393920202020202020573830305320:6
vml.01000000003232303232313130383831393920202020202020573830305320:7
vml.01000000003232303232313130383831393920202020202020573830305320:8
vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b89f9e2
vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b89f9e2:1
[root@esxi:~] voma -m vmfs -f check -N -d /vmfs/devices/disks/vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b8
9f9e2:1
Running VMFS Checker version 2.1 in check mode
Initializing LVM metadata, Basic Checks will be done
Checking for filesystem activity
Performing filesystem liveness check..|Scanning for VMFS-6 host activity (4096 bytes/HB, 1024 HBs).
Reservation Support is not present for NVME devices
Performing filesystem liveness check..|
########################################################################
# Warning !!! #
# #
# You are about to execute VOMA without device reservation. #
# Any access to this device from other hosts when VOMA is running #
# can cause severe data corruption #
# #
# This mode is supported only under VMware support supervision. #
########################################################################
Do you want to continue (Y/N)?
0) _Yes
1) _No
Select a number from 0-1: 0
Phase 1: Checking VMFS header and resource files
Detected VMFS-6 file system (labeled:'nvme2tb') with UUID:68e4cab1-0a865c28-49c0-04ab182311d3, Version 6:82
Phase 2: Checking VMFS heartbeat region
Phase 3: Checking all file descriptors.
Phase 4: Checking pathname and connectivity.
Phase 5: Checking resource reference counts.
Total Errors Found: 0
[root@esxi:~]
[root@esxi:~] voma -m vmfs -f check -N -d /vmfs/devices/disks/vml.05c56298c6cae09f64ef49957d1d7af93c98b2a5792c87d191b47f87ea5b8
9f9e2:1
Running VMFS Checker version 2.1 in check mode
Initializing LVM metadata, Basic Checks will be done
Checking for filesystem activity
Performing filesystem liveness check..|Scanning for VMFS-6 host activity (4096 bytes/HB, 1024 HBs).
Reservation Support is not present for NVME devices
Performing filesystem liveness check..|
########################################################################
# Warning !!! #
# #
# You are about to execute VOMA without device reservation. #
# Any access to this device from other hosts when VOMA is running #
# can cause severe data corruption #
# #
# This mode is supported only under VMware support supervision. #
########################################################################
Do you want to continue (Y/N)?
0) _Yes
1) _No
Select a number from 0-1: 0
Phase 1: Checking VMFS header and resource files
Detected VMFS-6 file system (labeled:'snap-444b0642-nvme2tb') with UUID:68e5b682-56352c06-7c60-04ab182311d3, Version 6:82
Phase 2: Checking VMFS heartbeat region
Phase 3: Checking all file descriptors.
Phase 4: Checking pathname and connectivity.
Phase 5: Checking resource reference counts.
Total Errors Found: 0
[root@esxi:~] esxcli storage vmfs snapshot list
[root@esxi:~] esxcli storage filesystem list
Mount Point Volume Name UUID Mounted Type Size Free
------------------------------------------------- ------------------------------------------ ----------------------------------- ------- ------ ------------- -------------
/vmfs/volumes/68cad69a-e82d8e40-5b65-5bb7fb6107f2 datastore1 68cad69a-e82d8e40-5b65-5bb7fb6107f2 true VMFS-6 118380036096 91743059968
/vmfs/volumes/68e5b682-56352c06-7c60-04ab182311d3 snap-444b0642-nvme2tb 68e5b682-56352c06-7c60-04ab182311d3 true VMFS-6 2048162529280 1222844088320
/vmfs/volumes/68cad69a-d23fb18e-73e5-5bb7fb6107f2 OSDATA-68cad69a-d23fb18e-73e5-5bb7fb6107f2 68cad69a-d23fb18e-73e5-5bb7fb6107f2 true VMFSOS 128580583424 125363552256
/vmfs/volumes/fa8a25f7-ba40ebee-45ac-f419c9f388e0 BOOTBANK1 fa8a25f7-ba40ebee-45ac-f419c9f388e0 true vfat 4293591040 4022075392
/vmfs/volumes/f43b0450-7e4d6762-c6be-52e6552cc1f8 BOOTBANK2 f43b0450-7e4d6762-c6be-52e6552cc1f8 true vfat 4293591040 4021354496
[root@esxi:~]
特に状況は変わらない
lockmode確認すると、そちらでもデバイスは増えた
[root@esxi:~] esxcli storage vmfs lockmode list
Volume Name UUID Type Locking Mode ATS Compatible ATS Upgrade Modes ATS Incompatibility Reason
------------------------------------------ ----------------------------------- -------- ------------ -------------- ----------------- --------------------------
datastore1 68cad69a-e82d8e40-5b65-5bb7fb6107f2 VMFS-6 ATS+SCSI false None Device does not support ATS
snap-444b0642-nvme2tb 68e5b682-56352c06-7c60-04ab182311d3 VMFS-6 ATS+SCSI false None Device does not support ATS
OSDATA-68cad69a-d23fb18e-73e5-5bb7fb6107f2 68cad69a-d23fb18e-73e5-5bb7fb6107f2 Non-VMFS ATS+SCSI false None Device does not support ATS
[root@esxi:~]
Persisting USB NIC Bindings Option 1: Run the following ESXCLI command which will enable the driver parameter to perform a full USB bus scan during startup: esxcli system module parameters set -p “usbBusFullScanOnBootEnabled=1” -m vmkusb_nic_fling
[root@esxi:~] esxcli system module list|grep nic
vmkusb_nic_fling true true
[root@esxi:~] esxcli system module list|grep usb
vmkusb_nic_fling true true
[root@esxi:~]
モジュール vmkusb_nic_fling は、ESXi 8.0でも存在している。
モジュールに対して設定できるパラメータを確認。
[root@esxi:~] esxcli system module parameters list -m vmkusb_nic_fling
Name Type Value Description
--------------------------- ------ ----- -----------
usbBusFullScanOnBootEnabled int Enable USB Bus full scan on system boot: 0 No (Default), 1 Yes
usbCdromPassthroughEnabled int Enable USB CDROM device for USB passtrough: 0 No (Default), 1 Yes
usbStorageRegisterDelaySecs int Delay to register cached USB storage device: Min: 0 second, Max: 600 seconds, Default: 10 seconds
vusb0_mac string Persist vusb0 MAC Address: xx:xx:xx:xx:xx:xx
vusb10_mac string Persist vusb10 MAC Address: xx:xx:xx:xx:xx:xx
vusb11_mac string Persist vusb11 MAC Address: xx:xx:xx:xx:xx:xx
vusb1_mac string Persist vusb1 MAC Address: xx:xx:xx:xx:xx:xx
vusb2_mac string Persist vusb2 MAC Address: xx:xx:xx:xx:xx:xx
vusb3_mac string Persist vusb3 MAC Address: xx:xx:xx:xx:xx:xx
vusb4_mac string Persist vusb4 MAC Address: xx:xx:xx:xx:xx:xx
vusb5_mac string Persist vusb5 MAC Address: xx:xx:xx:xx:xx:xx
vusb6_mac string Persist vusb6 MAC Address: xx:xx:xx:xx:xx:xx
vusb7_mac string Persist vusb7 MAC Address: xx:xx:xx:xx:xx:xx
vusb8_mac string Persist vusb8 MAC Address: xx:xx:xx:xx:xx:xx
vusb9_mac string Persist vusb9 MAC Address: xx:xx:xx:xx:xx:xx
[root@esxi:~]
usbBusFullScanOnBootEnabled が初期値0で存在していることを確認
(“Persisting VMkernel to USB NIC mappings”に記載されている複数のUSB NICがある時に、指す場所を変えてもvusbの番号が変わらないようにするための設定も引き続きある)
現段階のesxcliでの正式オプションに修正して、「esxcli system module parameters set –module=vmkusb_nic_fling –parameter-string=”usbBusFullScanOnBootEnabled=1″」と実行する
[root@esxi:~] esxcli system module parameters set --module=vmkusb_nic_fling --parameter-string="usbBusFullScanOnBootEnabled=1"
[root@esxi:~] esxcli system module parameters list -m vmkusb_nic_fling
Name Type Value Description
--------------------------- ------ ----- -----------
usbBusFullScanOnBootEnabled int 1 Enable USB Bus full scan on system boot: 0 No (Default), 1 Yes
usbCdromPassthroughEnabled int Enable USB CDROM device for USB passtrough: 0 No (Default), 1 Yes
usbStorageRegisterDelaySecs int Delay to register cached USB storage device: Min: 0 second, Max: 600 seconds, Default: 10 seconds
vusb0_mac string Persist vusb0 MAC Address: xx:xx:xx:xx:xx:xx
vusb10_mac string Persist vusb10 MAC Address: xx:xx:xx:xx:xx:xx
vusb11_mac string Persist vusb11 MAC Address: xx:xx:xx:xx:xx:xx
vusb1_mac string Persist vusb1 MAC Address: xx:xx:xx:xx:xx:xx
vusb2_mac string Persist vusb2 MAC Address: xx:xx:xx:xx:xx:xx
vusb3_mac string Persist vusb3 MAC Address: xx:xx:xx:xx:xx:xx
vusb4_mac string Persist vusb4 MAC Address: xx:xx:xx:xx:xx:xx
vusb5_mac string Persist vusb5 MAC Address: xx:xx:xx:xx:xx:xx
vusb6_mac string Persist vusb6 MAC Address: xx:xx:xx:xx:xx:xx
vusb7_mac string Persist vusb7 MAC Address: xx:xx:xx:xx:xx:xx
vusb8_mac string Persist vusb8 MAC Address: xx:xx:xx:xx:xx:xx
vusb9_mac string Persist vusb9 MAC Address: xx:xx:xx:xx:xx:xx
[root@esxi:~]
[root@esxi:~] ls -l /etc/rc.local.d
total 32
-r-xr-xr-x 1 root root 378 Apr 3 2025 009.vsanwitness.sh
drwxr-xr-x 1 root root 512 Oct 3 00:25 autodeploy
-r-xr-xr-x 1 root root 2249 Apr 3 2025 backupPrevBootLogs.py
-r-xr-xr-x 1 root root 2071 Apr 3 2025 cleanupStatefulHost.py
-r-xr-xr-x 1 root root 2567 Apr 3 2025 kickstart.py
-rwxr-xr-t 1 root root 506 Apr 3 2025 local.sh
-r-xr-xr-x 1 root root 397 Apr 3 2025 psaScrub.sh
-r-xr-xr-x 1 root root 1190 Apr 3 2025 raiseConfigStoreVob.py
[root@esxi:~] cat /etc/rc.local.d/local.sh
#!/bin/sh ++group=host/vim/vmvisor/boot
# local configuration options
# Note: modify at your own risk! If you do/use anything in this
# script that is not part of a stable API (relying on files to be in
# specific places, specific tools, specific output, etc) there is a
# possibility you will end up with a broken system after patching or
# upgrading. Changes are not supported unless under direction of
# VMware support.
# Note: This script will not be run when UEFI secure boot is enabled.
exit 0
[root@esxi:~]
今回実行したesxcliのコマンド群を追加
[root@esxi:~] vi /etc/rc.local.d/local.sh
[root@esxi:~] cat /etc/rc.local.d/local.sh
#!/bin/sh ++group=host/vim/vmvisor/boot
# local configuration options
# Note: modify at your own risk! If you do/use anything in this
# script that is not part of a stable API (relying on files to be in
# specific places, specific tools, specific output, etc) there is a
# possibility you will end up with a broken system after patching or
# upgrading. Changes are not supported unless under direction of
# VMware support.
# Note: This script will not be run when UEFI secure boot is enabled.
esxcli network vswitch standard uplink add --vswitch-name=vSwitch0 --uplink-name=vusb0
esxcli network vswitch standard portgroup policy failover set --portgroup-name="Management Network" --active-uplinks=vusb0
esxcli network vswitch standard portgroup policy failover set --portgroup-name="VM Network" --active-uplinks=vusb0
exit 0
[root@esxi:~]
[root@esxi:~] date
Fri Oct 3 00:57:12 UTC 2025
[root@esxi:~] ls -ltr /bootbank/
total 261895
<略>
-rwx------ 1 root root 1797 Sep 17 16:34 boot.cfg
-rwx------ 1 root root 102 Oct 3 00:25 jumpstrt.gz
-rwx------ 1 root root 266977 Oct 3 00:31 state.tgz
[root@esxi:~]
/bootbank/state.tgz が更新されていない
[root@esxi:~] auto-backup.sh
ConfigStore has been modified since the last backup
Bootbank lock is /var/lock/bootbank/f43b0450-7e4d6762-c6be-52e6552cc1f8
INFO: Successfully claimed lock file for pid 526790
Saving current state in /bootbank
Ssh configuration synced to configstore
Creating ConfigStore Backup
Locking esx.conf
Creating archive
Unlocked esx.conf
Using key ID d27fa69c-5edc-424d-bc0f-61d7966bf4d4 to encrypt
Clock updated.
Time: 00:57:21 Date: 10/03/2025 UTC
[root@esxi:~]
auto-backup.shを実行後を確認
[root@esxi:~] ls -ltr /bootbank/
total 261895
<略>
-rwx------ 1 root root 1797 Sep 17 16:34 boot.cfg
-rwx------ 1 root root 102 Oct 3 00:25 jumpstrt.gz
-rwx------ 1 root root 266974 Oct 3 00:57 state.tgz
[root@esxi:~]
VMware flingsで配布している「USB Network Native Driver for ESXi」からvmkusb_nic_fling ドライバをインストールすると、使えるUSB NICの種類が増える
[root@esxi:/vmfs/volumes/6908722d-a37ea8a3-525a-4d150daf152f/iso] esxcli software vib install -d /vmfs/volumes/datastore1/iso/ESXi8
03-VMKUSB-NIC-FLING-76444229-component-24179899.zip
Installation Result
Message: The update completed successfully, but the system needs to be rebooted for the changes to be effective.
VIBs Installed: VMW_bootbank_vmkusb-nic-fling_1.14-2vmw.803.0.0.76444229
VIBs Removed:
VIBs Skipped:
Reboot Required: true
DPU Results:
[root@esxi:/vmfs/volumes/6908722d-a37ea8a3-525a-4d150daf152f/iso]