Skip to content
This repository was archived by the owner on Mar 26, 2020. It is now read-only.

Commit 14fc355

Browse files
committed
Merge branch 'master' of https://github.com/gluster/glusterd2 into cluster-wide-options
2 parents 6dde8aa + 18c0b00 commit 14fc355

File tree

18 files changed

+533
-57
lines changed

18 files changed

+533
-57
lines changed

e2e/brickmux_test.go

Lines changed: 248 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ func TestBrickMux(t *testing.T) {
4242
}
4343

4444
volname1 := formatVolName(t.Name())
45+
volname2 := volname1 + strconv.Itoa(2)
4546

4647
createReq := api.VolCreateReq{
4748
Name: volname1,
@@ -84,8 +85,6 @@ func TestBrickMux(t *testing.T) {
8485
brickPaths = append(brickPaths, brickPath)
8586
}
8687

87-
volname2 := volname1 + "2"
88-
8988
createReq = api.VolCreateReq{
9089
Name: volname2,
9190
Subvols: []api.SubvolReq{
@@ -165,7 +164,7 @@ func TestBrickMux(t *testing.T) {
165164
r.Nil(client.VolumeStop(volname2))
166165

167166
voloptReq := api.VolOptionReq{
168-
Options: map[string]string{"write-behind.trickling-writes": "on"},
167+
Options: map[string]string{"io-stats.count-fop-hits": "on"},
169168
}
170169
voloptReq.AllowAdvanced = true
171170
err = client.VolumeSet(volname2, voloptReq)
@@ -244,7 +243,7 @@ func TestBrickMux(t *testing.T) {
244243
}
245244

246245
// Stop glusterd2 instance and kill the glusterfsd into
247-
// whcih all bricks were multiplexed
246+
// which all bricks were multiplexed
248247
r.Nil(tc.gds[0].Stop())
249248
process, err = os.FindProcess(pid)
250249
r.Nil(err, fmt.Sprintf("failed to find brick pid: %s", err))
@@ -276,5 +275,250 @@ func TestBrickMux(t *testing.T) {
276275
r.Nil(client.VolumeStop(volname1 + strconv.Itoa(i)))
277276
r.Nil(client.VolumeDelete(volname1 + strconv.Itoa(i)))
278277
}
278+
279+
// Turn on brick mux max-bricks-per-process cluster option
280+
optReq = api.ClusterOptionReq{
281+
Options: map[string]string{"cluster.max-bricks-per-process": "5"},
282+
}
283+
err = client.ClusterOptionSet(optReq)
284+
r.Nil(err)
285+
286+
for i := 36; i <= 100; i++ {
287+
brickPath := testTempDir(t, "brick")
288+
brickPaths = append(brickPaths, brickPath)
289+
}
290+
291+
/* Test for Testing max-bricks-per-process constraint while
292+
multiplexing*/
293+
294+
// Create 10 volumes and start all 10
295+
// making all brick multiplexed with the constraint that
296+
// max-bricks-per-process is 5
297+
298+
index = 37
299+
for i := 1; i <= 10; i++ {
300+
301+
createReq := api.VolCreateReq{
302+
Name: volname1 + strconv.Itoa(i),
303+
Subvols: []api.SubvolReq{
304+
{
305+
Type: "distribute",
306+
Bricks: []api.BrickReq{
307+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[index]},
308+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[index+1]},
309+
},
310+
},
311+
},
312+
Force: true,
313+
}
314+
315+
if i%2 != 0 {
316+
createReq = api.VolCreateReq{
317+
Name: volname1 + strconv.Itoa(i),
318+
Subvols: []api.SubvolReq{
319+
{
320+
ReplicaCount: 2,
321+
Type: "replicate",
322+
Bricks: []api.BrickReq{
323+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[index]},
324+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[index+1]},
325+
},
326+
},
327+
},
328+
Force: true,
329+
}
330+
331+
}
332+
_, err = client.VolumeCreate(createReq)
333+
r.Nil(err)
334+
// start the volume
335+
err = client.VolumeStart(volname1+strconv.Itoa(i), false)
336+
r.Nil(err)
337+
338+
index = index + 2
339+
}
340+
341+
// pidMap and portMap just to mantain count of every pid and port of
342+
// bricks from all 5 volumes.
343+
pidMap := make(map[int]int)
344+
portMap := make(map[int]int)
345+
for i := 1; i <= 10; i++ {
346+
bstatus, err := client.BricksStatus(volname1 + strconv.Itoa(i))
347+
r.Nil(err)
348+
for _, b := range bstatus {
349+
if _, ok := pidMap[b.Pid]; ok {
350+
pidMap[b.Pid]++
351+
} else {
352+
pidMap[b.Pid] = 1
353+
}
354+
355+
if _, ok := portMap[b.Port]; ok {
356+
portMap[b.Port]++
357+
} else {
358+
portMap[b.Port] = 1
359+
}
360+
361+
}
362+
}
363+
364+
// Check if all pid's and ports's have count = 2 as mentioned in
365+
// max-bricks-per-process
366+
for _, v := range pidMap {
367+
r.Equal(v, 5)
368+
}
369+
370+
for _, v := range portMap {
371+
r.Equal(v, 5)
372+
}
373+
374+
r.Nil(gd.Stop())
375+
for k := range pidMap {
376+
process, err := os.FindProcess(k)
377+
r.Nil(err, fmt.Sprintf("failed to find brick pid: %s", err))
378+
err = process.Signal(syscall.Signal(15))
379+
r.Nil(err, fmt.Sprintf("failed to kill brick: %s", err))
380+
}
381+
382+
// Spawn glusterd2 instance
383+
gd, err = spawnGlusterd(t, "./config/1.toml", false)
384+
r.Nil(err)
385+
r.True(gd.IsRunning())
386+
387+
time.Sleep(10 * time.Millisecond)
388+
389+
pidMap = make(map[int]int)
390+
portMap = make(map[int]int)
391+
for i := 1; i <= 10; i++ {
392+
bstatus, err := client.BricksStatus(volname1 + strconv.Itoa(i))
393+
r.Nil(err)
394+
for _, b := range bstatus {
395+
if _, ok := pidMap[b.Pid]; ok {
396+
pidMap[b.Pid]++
397+
} else {
398+
pidMap[b.Pid] = 1
399+
}
400+
401+
if _, ok := portMap[b.Port]; ok {
402+
portMap[b.Port]++
403+
} else {
404+
portMap[b.Port] = 1
405+
}
406+
407+
}
408+
}
409+
410+
// Check if all pid's and ports's have count = 2 as mentioned in
411+
// max-bricks-per-process
412+
for _, v := range pidMap {
413+
r.Equal(v, 5)
414+
}
415+
416+
for _, v := range portMap {
417+
r.Equal(v, 5)
418+
}
419+
420+
for i := 1; i <= 10; i++ {
421+
r.Nil(client.VolumeStop(volname1 + strconv.Itoa(i)))
422+
r.Nil(client.VolumeDelete(volname1 + strconv.Itoa(i)))
423+
}
424+
425+
// Create two volumes with different options, so that bricks from these
426+
// two volumes are multiplexed into bricks from their own volume. Also,
427+
// check if among three bricks of a volume 2 bricks have same pid and
428+
// port while 1 brick has a different pid and port, since num of bricks
429+
// are 3 and max-bricks-per-process is set as 2.
430+
431+
// Turn on brick mux cluster option
432+
optReq = api.ClusterOptionReq{
433+
Options: map[string]string{"cluster.max-bricks-per-process": "2"},
434+
}
435+
err = client.ClusterOptionSet(optReq)
436+
r.Nil(err)
437+
438+
createReq = api.VolCreateReq{
439+
Name: volname1,
440+
Subvols: []api.SubvolReq{
441+
{
442+
ReplicaCount: 3,
443+
Type: "replicate",
444+
Bricks: []api.BrickReq{
445+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[51]},
446+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[52]},
447+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[53]},
448+
},
449+
},
450+
},
451+
Force: true,
452+
}
453+
_, err = client.VolumeCreate(createReq)
454+
r.Nil(err)
455+
456+
// start the volume
457+
err = client.VolumeStart(volname1, false)
458+
r.Nil(err)
459+
460+
createReq = api.VolCreateReq{
461+
Name: volname2,
462+
Subvols: []api.SubvolReq{
463+
{
464+
Type: "distribute",
465+
Bricks: []api.BrickReq{
466+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[48]},
467+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[49]},
468+
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[50]},
469+
},
470+
},
471+
},
472+
Force: true,
473+
}
474+
_, err = client.VolumeCreate(createReq)
475+
r.Nil(err)
476+
477+
// Setting an option in second volume so that second volume doesn't
478+
// multiplex its brick into first volume
479+
var optionReq api.VolOptionReq
480+
optionReq.Options = map[string]string{"io-stats.count-fop-hits": "on"}
481+
optionReq.AllowAdvanced = true
482+
483+
r.Nil(client.VolumeSet(volname2, optionReq))
484+
485+
// start the volume
486+
err = client.VolumeStart(volname2, false)
487+
r.Nil(err)
488+
489+
bstatus, err = client.BricksStatus(volname1)
490+
r.Nil(err)
491+
492+
// Keep track of used unique pids and ports used in multiplexing bricks
493+
// of volname1 and calculate length length of maps, which should be equal to 2
494+
pidMap = make(map[int]int)
495+
portMap = make(map[int]int)
496+
for _, b := range bstatus {
497+
pidMap[b.Pid] = 1
498+
portMap[b.Port] = 1
499+
}
500+
r.Equal(len(pidMap), 2)
501+
r.Equal(len(portMap), 2)
502+
503+
bstatus2, err := client.BricksStatus(volname2)
504+
r.Nil(err)
505+
506+
// Keep track of used unique pids and ports used in multiplexing bricks
507+
// of volname1 and calculate length length of maps, which should be equal to 2
508+
pidMap = make(map[int]int)
509+
portMap = make(map[int]int)
510+
for _, b := range bstatus2 {
511+
pidMap[b.Pid] = 1
512+
portMap[b.Port] = 1
513+
}
514+
r.Equal(len(pidMap), 2)
515+
r.Equal(len(portMap), 2)
516+
517+
r.Nil(client.VolumeStop(volname1))
518+
r.Nil(client.VolumeDelete(volname1))
519+
520+
r.Nil(client.VolumeStop(volname2))
521+
r.Nil(client.VolumeDelete(volname2))
522+
279523
r.Nil(gd.Stop())
280524
}

glustercli/cmd/cluster-options.go

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ func clusterSetCmdRun(cmd *cobra.Command, args []string) {
5757
}
5858
failure("Cluster option set failed", err, 1)
5959
} else {
60-
fmt.Printf("Options set successfully \n")
60+
fmt.Println("Options set successfully")
6161
}
6262
}
6363

@@ -71,16 +71,12 @@ func clusterOptionJSONHandler(cmd *cobra.Command, options []string) error {
7171

7272
err := client.ClusterOptionSet(api.ClusterOptionReq{
7373
Options: copt})
74-
if err != nil {
75-
return err
76-
}
77-
return nil
74+
return err
7875
}
7976

8077
var clusterOptionGetCmd = &cobra.Command{
8178
Use: "get",
8279
Short: helpClusterOptionGetCmd,
83-
Args: cobra.RangeArgs(0, 1),
8480
Run: func(cmd *cobra.Command, args []string) {
8581
table := tablewriter.NewWriter(os.Stdout)
8682
opts, err := client.GetClusterOption()

0 commit comments

Comments
 (0)