blob: 366bf20a31247b45924e0c6f2e68047026445de3 [file] [log] [blame]
Matteo Scandoloa4285862020-12-01 18:10:10 -08001/*
2Copyright 2016 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package workqueue
18
19import (
20 "context"
21 "sync"
22
23 utilruntime "k8s.io/apimachinery/pkg/util/runtime"
24)
25
26type DoWorkPieceFunc func(piece int)
27
28type options struct {
29 chunkSize int
30}
31
32type Options func(*options)
33
34// WithChunkSize allows to set chunks of work items to the workers, rather than
35// processing one by one.
36// It is recommended to use this option if the number of pieces significantly
37// higher than the number of workers and the work done for each item is small.
38func WithChunkSize(c int) func(*options) {
39 return func(o *options) {
40 o.chunkSize = c
41 }
42}
43
44// ParallelizeUntil is a framework that allows for parallelizing N
45// independent pieces of work until done or the context is canceled.
46func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
47 if pieces == 0 {
48 return
49 }
50 o := options{}
51 for _, opt := range opts {
52 opt(&o)
53 }
54 chunkSize := o.chunkSize
55 if chunkSize < 1 {
56 chunkSize = 1
57 }
58
59 chunks := ceilDiv(pieces, chunkSize)
60 toProcess := make(chan int, chunks)
61 for i := 0; i < chunks; i++ {
62 toProcess <- i
63 }
64 close(toProcess)
65
66 var stop <-chan struct{}
67 if ctx != nil {
68 stop = ctx.Done()
69 }
70 if chunks < workers {
71 workers = chunks
72 }
73 wg := sync.WaitGroup{}
74 wg.Add(workers)
75 for i := 0; i < workers; i++ {
76 go func() {
77 defer utilruntime.HandleCrash()
78 defer wg.Done()
79 for chunk := range toProcess {
80 start := chunk * chunkSize
81 end := start + chunkSize
82 if end > pieces {
83 end = pieces
84 }
85 for p := start; p < end; p++ {
86 select {
87 case <-stop:
88 return
89 default:
90 doWorkPiece(p)
91 }
92 }
93 }
94 }()
95 }
96 wg.Wait()
97}
98
99func ceilDiv(a, b int) int {
100 return (a + b - 1) / b
101}