Zare NAS Servers: Difference between revisions
From dtype.org
(→zare4) |
|||
Line 14: | Line 14: | ||
ARRAY /dev/md/1 metadata=1.2 UUID=99f19493:fcd2742f:67f4594d:1d266851 name=zare4:1 | ARRAY /dev/md/1 metadata=1.2 UUID=99f19493:fcd2742f:67f4594d:1d266851 name=zare4:1 | ||
ARRAY /dev/md/2 metadata=1.2 UUID=5496c365:497b6097:2dfe14ca:a83c0836 name=zare4:2</nowiki> | ARRAY /dev/md/2 metadata=1.2 UUID=5496c365:497b6097:2dfe14ca:a83c0836 name=zare4:2</nowiki> | ||
=== /proc/mdstat === | |||
<nowiki>root@zare4:/etc# cat /proc/mdstat | |||
Personalities : [raid6] [raid5] [raid4] [linear] [multipath] [raid0] [raid1] [raid10] | |||
md0 : active raid6 sdc[2] sde[1] sdb[0] sdd[3] sdh[6] sdi[7] sdg[5] sdf[4] | |||
17581592448 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] | |||
md2 : active raid6 sdy[7] sdr[0] sdu[3] sdw[5] sdv[4] sdx[6] sds[1] sdt[2] | |||
35162348160 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] | |||
md1 : active raid6 sdq[7] sdm[3] sdk[1] sdo[6] sdp[5] sdl[2] sdn[4] sdj[0] | |||
17581592448 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]</nowiki> | |||
== zare5 == | == zare5 == |
Revision as of 20:42, 30 April 2017
I put together storage for video for Cathedral Of Faith, where I also act as a volunteer camera operator.
zare4
mdadm.conf
# mdadm.conf CREATE owner=root group=disk mode=0660 auto=yes HOMEHOST <system> MAILADDR root ARRAY /dev/md/0 metadata=1.2 UUID=7290e777:ec536a62:2c14867c:65f03fcb name=zare4:0 ARRAY /dev/md/1 metadata=1.2 UUID=99f19493:fcd2742f:67f4594d:1d266851 name=zare4:1 ARRAY /dev/md/2 metadata=1.2 UUID=5496c365:497b6097:2dfe14ca:a83c0836 name=zare4:2
/proc/mdstat
root@zare4:/etc# cat /proc/mdstat Personalities : [raid6] [raid5] [raid4] [linear] [multipath] [raid0] [raid1] [raid10] md0 : active raid6 sdc[2] sde[1] sdb[0] sdd[3] sdh[6] sdi[7] sdg[5] sdf[4] 17581592448 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md2 : active raid6 sdy[7] sdr[0] sdu[3] sdw[5] sdv[4] sdx[6] sds[1] sdt[2] 35162348160 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md1 : active raid6 sdq[7] sdm[3] sdk[1] sdo[6] sdp[5] sdl[2] sdn[4] sdj[0] 17581592448 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
zare5
Creation of md2, fs3
root@zare5:~# mdadm --create /dev/md2 --chunk=64 --level=6 --raid-devices=8 /dev/sdr /dev/sds /dev/sdt /dev/sdu /dev/sdv /dev/sdw /dev/sdx /dev/sdy root@zare5:~# mkfs.xfs -d sunit=128,swidth=768 -f /dev/md2 meta-data=/dev/md2 isize=512 agcount=55, agsize=268435440 blks = sectsz=4096 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=0 data = bsize=4096 blocks=14649458688, imaxpct=1 = sunit=16 swidth=96 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=521728, version=2 = sectsz=4096 sunit=1 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0
mdadm.conf
root@zare5:/etc# cat mdadm/mdadm.conf # auto-create devices with Debian standard permissions CREATE owner=root group=disk mode=0660 auto=yes # automatically tag new arrays as belonging to the local system HOMEHOST <system> # instruct the monitoring daemon where to send mail alerts MAILADDR root # definitions of existing MD arrays ARRAY /dev/md/0 metadata=1.2 UUID=b7d27941:c794ebe6:8ee17e49:36839b35 name=zare5:0 ARRAY /dev/md/1 metadata=1.2 UUID=d78ab42d:0309f981:a1c345e7:77a699ca name=zare5:1 ARRAY /dev/md2 metadata=1.2 name=zare5:2 UUID=61ed4956:6127018f:ea4d9426:f7a06dd6
mdstat
root@zare5:~# cat /proc/mdstat Personalities : [raid6] [raid5] [raid4] [linear] [multipath] [raid0] [raid1] [raid10] md0 : active raid6 sdf[4] sdd[2] sde[3] sdc[8] sdi[7] sdb[0] sdg[9] sdh[6] 23441324160 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md1 : active raid6 sdj[0] sdm[3] sdq[7] sdk[8] sdl[2] sdo[4] sdp[6] sdn[9] 23441324160 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] unused devices: <none>
/etc/samba/smb.conf
[global] workgroup = WORKGROUP server string = zare5 server dns proxy = no log file = /var/log/samba.log max log size = 10000 syslog = 0 panic action = /usr/share/samba/panic-action %d encrypt passwords = true passdb backend = tdbsam obey pam restrictions = yes unix password sync = yes passwd program = /usr/bin/passwd %u passwd chat = *Enter\snew\s*\spassword:* %n\n *Retype\snew\s*\spassword:* %n\n *password\supdated\ssuccessfully* . map to guest = bad user usershare allow guests = yes [zshare5a-write] path = /opt/fs1/ read only = No guest ok = No create mask = 0664 directory mask = 0775 [zshare5a-read] path = /opt/fs1/ force user = samba-anon force group = samba-anon read only = Yes guest ok = Yes browseable = Yes [zshare5b-write] path = /opt/fs2/ read only = No guest ok = No create mask = 0664 directory mask = 0775 [zshare5b-read] path = /opt/fs2/ force user = samba-anon force group = samba-anon read only = Yes guest ok = Yes browseable = Yes [zshare5c-write] path = /opt/fs3/ read only = No guest ok = No create mask = 0664 directory mask = 0775 [zshare5c-read] path = /opt/fs3/ force user = samba-anon force group = samba-anon read only = Yes guest ok = Yes browseable = Yes