hexsha stringlengths 40 40 | size int64 61 981k | ext stringclasses 1 value | lang stringclasses 1 value | max_stars_repo_path stringlengths 9 192 | max_stars_repo_name stringlengths 4 78 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses sequencelengths 1 6 | max_stars_count int64 1 107k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 9 192 | max_issues_repo_name stringlengths 4 76 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses sequencelengths 1 6 | max_issues_count int64 1 98.3k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 9 192 | max_forks_repo_name stringlengths 4 76 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses sequencelengths 1 6 | max_forks_count int64 1 36.6k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 61 981k | avg_line_length float64 6.25 4.6k | max_line_length int64 25 142k | alphanum_fraction float64 0.13 0.99 | test_functions sequencelengths 1 2.19k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f700249a3e7dad9df09c48c5e4f9cc2c1e4a60dc | 6,115 | go | Go | vendor/github.com/hyperhq/hypercli/api/client/formatter/custom_test.go | sozercan/virtual-kubelet | f3ebde2533f7854698df8a298386154d423854e2 | [
"MIT"
] | 192 | 2016-04-06T15:42:41.000Z | 2021-11-09T09:34:49.000Z | vendor/github.com/hyperhq/hypercli/api/client/formatter/custom_test.go | tjfontaine/virtual-kubelet | 5286869440706ff178f75c270635be369fae87fe | [
"Apache-2.0"
] | 146 | 2016-04-06T15:46:05.000Z | 2020-05-06T08:24:35.000Z | vendor/github.com/hyperhq/hypercli/api/client/formatter/custom_test.go | tjfontaine/virtual-kubelet | 5286869440706ff178f75c270635be369fae87fe | [
"Apache-2.0"
] | 54 | 2016-04-05T23:40:20.000Z | 2021-01-07T13:19:53.000Z | package formatter
import (
"reflect"
"strings"
"testing"
"time"
"github.com/hyperhq/hyper-api/types"
"github.com/hyperhq/hypercli/pkg/stringid"
)
func TestContainerPsContext(t *testing.T) {
containerID := stringid.GenerateRandomID()
unix := time.Now().Unix()
var ctx containerContext
cases := []struct {
container types.Container
trunc bool
expValue string
expHeader string
call func() string
}{
{types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID},
{types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID},
{types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names},
{types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image},
{types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image},
{types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image},
{types.Container{
Image: "a5a665ff33eced1e0803148700880edab4",
ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5",
},
true,
"a5a665ff33ec",
imageHeader,
ctx.Image,
},
{types.Container{
Image: "a5a665ff33eced1e0803148700880edab4",
ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5",
},
false,
"a5a665ff33eced1e0803148700880edab4",
imageHeader,
ctx.Image,
},
{types.Container{Image: ""}, true, "<no image>", imageHeader, ctx.Image},
{types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command},
{types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports},
{types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status},
{types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size},
{types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size},
{types.Container{}, true, "", labelsHeader, ctx.Labels},
{types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels},
{types.Container{Created: unix}, true, "Less than a second", runningForHeader, ctx.RunningFor},
}
for _, c := range cases {
ctx = containerContext{c: c.container, trunc: c.trunc}
v := c.call()
if strings.Contains(v, ",") {
compareMultipleValues(t, v, c.expValue)
} else if v != c.expValue {
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
}
h := ctx.fullHeader()
if h != c.expHeader {
t.Fatalf("Expected %s, was %s\n", c.expHeader, h)
}
}
c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}}
ctx = containerContext{c: c1, trunc: true}
sid := ctx.Label("com.docker.swarm.swarm-id")
node := ctx.Label("com.docker.swarm.node_name")
if sid != "33" {
t.Fatalf("Expected 33, was %s\n", sid)
}
if node != "ubuntu" {
t.Fatalf("Expected ubuntu, was %s\n", node)
}
h := ctx.fullHeader()
if h != "SWARM ID\tNODE NAME" {
t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h)
}
c2 := types.Container{}
ctx = containerContext{c: c2, trunc: true}
label := ctx.Label("anything.really")
if label != "" {
t.Fatalf("Expected an empty string, was %s", label)
}
ctx = containerContext{c: c2, trunc: true}
fullHeader := ctx.fullHeader()
if fullHeader != "" {
t.Fatalf("Expected fullHeader to be empty, was %s", fullHeader)
}
}
func TestImagesContext(t *testing.T) {
imageID := stringid.GenerateRandomID()
unix := time.Now().Unix()
var ctx imageContext
cases := []struct {
imageCtx imageContext
expValue string
expHeader string
call func() string
}{
{imageContext{
i: types.Image{ID: imageID},
trunc: true,
}, stringid.TruncateID(imageID), imageIDHeader, ctx.ID},
{imageContext{
i: types.Image{ID: imageID},
trunc: false,
}, imageID, imageIDHeader, ctx.ID},
{imageContext{
i: types.Image{Size: 10},
trunc: true,
}, "10 B", sizeHeader, ctx.Size},
{imageContext{
i: types.Image{Created: unix},
trunc: true,
}, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
// FIXME
// {imageContext{
// i: types.Image{Created: unix},
// trunc: true,
// }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince},
{imageContext{
i: types.Image{},
repo: "busybox",
}, "busybox", repositoryHeader, ctx.Repository},
{imageContext{
i: types.Image{},
tag: "latest",
}, "latest", tagHeader, ctx.Tag},
{imageContext{
i: types.Image{},
digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a",
}, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest},
}
for _, c := range cases {
ctx = c.imageCtx
v := c.call()
if strings.Contains(v, ",") {
compareMultipleValues(t, v, c.expValue)
} else if v != c.expValue {
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
}
h := ctx.fullHeader()
if h != c.expHeader {
t.Fatalf("Expected %s, was %s\n", c.expHeader, h)
}
}
}
func compareMultipleValues(t *testing.T, value, expected string) {
// comma-separated values means probably a map input, which won't
// be guaranteed to have the same order as our expected value
// We'll create maps and use reflect.DeepEquals to check instead:
entriesMap := make(map[string]string)
expMap := make(map[string]string)
entries := strings.Split(value, ",")
expectedEntries := strings.Split(expected, ",")
for _, entry := range entries {
keyval := strings.Split(entry, "=")
entriesMap[keyval[0]] = keyval[1]
}
for _, expected := range expectedEntries {
keyval := strings.Split(expected, "=")
expMap[keyval[0]] = keyval[1]
}
if !reflect.DeepEqual(expMap, entriesMap) {
t.Fatalf("Expected entries: %v, got: %v", expected, value)
}
}
| 31.683938 | 135 | 0.673426 | [
"func TestContainerPsContext(t *testing.T) {\n\tcontainerID := stringid.GenerateRandomID()\n\tunix := time.Now().Unix()\n\n\tvar ctx containerContext\n\tcases := []struct {\n\t\tcontainer types.Container\n\t\ttrunc bool\n\t\texpValue string\n\t\texpHeader string\n\t\tcall func() string\n\t}{\n\t\t{types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID},\n\t\t{types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID},\n\t\t{types.Container{Names: []string{\"/foobar_baz\"}}, true, \"foobar_baz\", namesHeader, ctx.Names},\n\t\t{types.Container{Image: \"ubuntu\"}, true, \"ubuntu\", imageHeader, ctx.Image},\n\t\t{types.Container{Image: \"verylongimagename\"}, true, \"verylongimagename\", imageHeader, ctx.Image},\n\t\t{types.Container{Image: \"verylongimagename\"}, false, \"verylongimagename\", imageHeader, ctx.Image},\n\t\t{types.Container{\n\t\t\tImage: \"a5a665ff33eced1e0803148700880edab4\",\n\t\t\tImageID: \"a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5\",\n\t\t},\n\t\t\ttrue,\n\t\t\t\"a5a665ff33ec\",\n\t\t\timageHeader,\n\t\t\tctx.Image,\n\t\t},\n\t\t{types.Container{\n\t\t\tImage: \"a5a665ff33eced1e0803148700880edab4\",\n\t\t\tImageID: \"a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5\",\n\t\t},\n\t\t\tfalse,\n\t\t\t\"a5a665ff33eced1e0803148700880edab4\",\n\t\t\timageHeader,\n\t\t\tctx.Image,\n\t\t},\n\t\t{types.Container{Image: \"\"}, true, \"<no image>\", imageHeader, ctx.Image},\n\t\t{types.Container{Command: \"sh -c 'ls -la'\"}, true, `\"sh -c 'ls -la'\"`, commandHeader, ctx.Command},\n\t\t{types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},\n\t\t{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: \"tcp\"}}}, true, \"8080/tcp\", portsHeader, ctx.Ports},\n\t\t{types.Container{Status: \"RUNNING\"}, true, \"RUNNING\", statusHeader, ctx.Status},\n\t\t{types.Container{SizeRw: 10}, true, \"10 B\", sizeHeader, ctx.Size},\n\t\t{types.Container{SizeRw: 10, SizeRootFs: 20}, true, \"10 B (virtual 20 B)\", sizeHeader, ctx.Size},\n\t\t{types.Container{}, true, \"\", labelsHeader, ctx.Labels},\n\t\t{types.Container{Labels: map[string]string{\"cpu\": \"6\", \"storage\": \"ssd\"}}, true, \"cpu=6,storage=ssd\", labelsHeader, ctx.Labels},\n\t\t{types.Container{Created: unix}, true, \"Less than a second\", runningForHeader, ctx.RunningFor},\n\t}\n\n\tfor _, c := range cases {\n\t\tctx = containerContext{c: c.container, trunc: c.trunc}\n\t\tv := c.call()\n\t\tif strings.Contains(v, \",\") {\n\t\t\tcompareMultipleValues(t, v, c.expValue)\n\t\t} else if v != c.expValue {\n\t\t\tt.Fatalf(\"Expected %s, was %s\\n\", c.expValue, v)\n\t\t}\n\n\t\th := ctx.fullHeader()\n\t\tif h != c.expHeader {\n\t\t\tt.Fatalf(\"Expected %s, was %s\\n\", c.expHeader, h)\n\t\t}\n\t}\n\n\tc1 := types.Container{Labels: map[string]string{\"com.docker.swarm.swarm-id\": \"33\", \"com.docker.swarm.node_name\": \"ubuntu\"}}\n\tctx = containerContext{c: c1, trunc: true}\n\n\tsid := ctx.Label(\"com.docker.swarm.swarm-id\")\n\tnode := ctx.Label(\"com.docker.swarm.node_name\")\n\tif sid != \"33\" {\n\t\tt.Fatalf(\"Expected 33, was %s\\n\", sid)\n\t}\n\n\tif node != \"ubuntu\" {\n\t\tt.Fatalf(\"Expected ubuntu, was %s\\n\", node)\n\t}\n\n\th := ctx.fullHeader()\n\tif h != \"SWARM ID\\tNODE NAME\" {\n\t\tt.Fatalf(\"Expected %s, was %s\\n\", \"SWARM ID\\tNODE NAME\", h)\n\n\t}\n\n\tc2 := types.Container{}\n\tctx = containerContext{c: c2, trunc: true}\n\n\tlabel := ctx.Label(\"anything.really\")\n\tif label != \"\" {\n\t\tt.Fatalf(\"Expected an empty string, was %s\", label)\n\t}\n\n\tctx = containerContext{c: c2, trunc: true}\n\tfullHeader := ctx.fullHeader()\n\tif fullHeader != \"\" {\n\t\tt.Fatalf(\"Expected fullHeader to be empty, was %s\", fullHeader)\n\t}\n\n}",
"func TestImagesContext(t *testing.T) {\n\timageID := stringid.GenerateRandomID()\n\tunix := time.Now().Unix()\n\n\tvar ctx imageContext\n\tcases := []struct {\n\t\timageCtx imageContext\n\t\texpValue string\n\t\texpHeader string\n\t\tcall func() string\n\t}{\n\t\t{imageContext{\n\t\t\ti: types.Image{ID: imageID},\n\t\t\ttrunc: true,\n\t\t}, stringid.TruncateID(imageID), imageIDHeader, ctx.ID},\n\t\t{imageContext{\n\t\t\ti: types.Image{ID: imageID},\n\t\t\ttrunc: false,\n\t\t}, imageID, imageIDHeader, ctx.ID},\n\t\t{imageContext{\n\t\t\ti: types.Image{Size: 10},\n\t\t\ttrunc: true,\n\t\t}, \"10 B\", sizeHeader, ctx.Size},\n\t\t{imageContext{\n\t\t\ti: types.Image{Created: unix},\n\t\t\ttrunc: true,\n\t\t}, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},\n\t\t// FIXME\n\t\t// {imageContext{\n\t\t// \ti: types.Image{Created: unix},\n\t\t// \ttrunc: true,\n\t\t// }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince},\n\t\t{imageContext{\n\t\t\ti: types.Image{},\n\t\t\trepo: \"busybox\",\n\t\t}, \"busybox\", repositoryHeader, ctx.Repository},\n\t\t{imageContext{\n\t\t\ti: types.Image{},\n\t\t\ttag: \"latest\",\n\t\t}, \"latest\", tagHeader, ctx.Tag},\n\t\t{imageContext{\n\t\t\ti: types.Image{},\n\t\t\tdigest: \"sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a\",\n\t\t}, \"sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a\", digestHeader, ctx.Digest},\n\t}\n\n\tfor _, c := range cases {\n\t\tctx = c.imageCtx\n\t\tv := c.call()\n\t\tif strings.Contains(v, \",\") {\n\t\t\tcompareMultipleValues(t, v, c.expValue)\n\t\t} else if v != c.expValue {\n\t\t\tt.Fatalf(\"Expected %s, was %s\\n\", c.expValue, v)\n\t\t}\n\n\t\th := ctx.fullHeader()\n\t\tif h != c.expHeader {\n\t\t\tt.Fatalf(\"Expected %s, was %s\\n\", c.expHeader, h)\n\t\t}\n\t}\n}"
] |
f7002a680f4e674bf27e31b32897a25024d1eb11 | 2,671 | go | Go | pkg/reconciler/v1alpha1/autoscaling/kpa/resources/service_test.go | TommyLike/serving | d4b62734274f62dd9f553f8923f5574f65899840 | [
"Apache-2.0"
] | null | null | null | pkg/reconciler/v1alpha1/autoscaling/kpa/resources/service_test.go | TommyLike/serving | d4b62734274f62dd9f553f8923f5574f65899840 | [
"Apache-2.0"
] | null | null | null | pkg/reconciler/v1alpha1/autoscaling/kpa/resources/service_test.go | TommyLike/serving | d4b62734274f62dd9f553f8923f5574f65899840 | [
"Apache-2.0"
] | null | null | null | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"testing"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
pav1a1 "github.com/knative/serving/pkg/apis/autoscaling/v1alpha1"
"github.com/knative/serving/pkg/apis/serving"
autoscalingv1 "k8s.io/api/autoscaling/v1"
)
var boolTrue = true
func TestMakeService(t *testing.T) {
pa := &pav1a1.PodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Namespace: "here",
Name: "with-you",
UID: "2006",
// Those labels are propagated from the Revision->KPA.
Labels: map[string]string{
serving.RevisionLabelKey: "with-you",
serving.RevisionUID: "2009",
},
Annotations: map[string]string{
"a": "b",
},
},
Spec: pav1a1.PodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: "with-you",
},
ServiceName: "with-you-service",
},
}
selector := map[string]string{"cant": "stop"}
want := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: "here",
Name: "with-you-metrics",
Labels: map[string]string{
// Those should be propagated.
serving.RevisionLabelKey: "with-you",
serving.RevisionUID: "2009",
kpaLabelKey: "with-you",
},
Annotations: map[string]string{
"a": "b",
},
OwnerReferences: []metav1.OwnerReference{{
APIVersion: pav1a1.SchemeGroupVersion.String(),
Kind: "PodAutoscaler",
Name: "with-you",
UID: "2006",
Controller: &boolTrue,
BlockOwnerDeletion: &boolTrue,
}},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
Port: 9090,
TargetPort: intstr.FromString("queue-metrics"),
}},
Selector: selector,
},
}
got := MakeMetricsService(pa, selector)
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("Metrics K8s Service mismatch (-want, +got) = %v", diff)
}
}
| 28.72043 | 72 | 0.659678 | [
"func TestMakeService(t *testing.T) {\n\tpa := &pav1a1.PodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"here\",\n\t\t\tName: \"with-you\",\n\t\t\tUID: \"2006\",\n\t\t\t// Those labels are propagated from the Revision->KPA.\n\t\t\tLabels: map[string]string{\n\t\t\t\tserving.RevisionLabelKey: \"with-you\",\n\t\t\t\tserving.RevisionUID: \"2009\",\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"a\": \"b\",\n\t\t\t},\n\t\t},\n\t\tSpec: pav1a1.PodAutoscalerSpec{\n\t\t\tScaleTargetRef: autoscalingv1.CrossVersionObjectReference{\n\t\t\t\tAPIVersion: \"apps/v1\",\n\t\t\t\tKind: \"Deployment\",\n\t\t\t\tName: \"with-you\",\n\t\t\t},\n\t\t\tServiceName: \"with-you-service\",\n\t\t},\n\t}\n\tselector := map[string]string{\"cant\": \"stop\"}\n\twant := &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"here\",\n\t\t\tName: \"with-you-metrics\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t// Those should be propagated.\n\t\t\t\tserving.RevisionLabelKey: \"with-you\",\n\t\t\t\tserving.RevisionUID: \"2009\",\n\t\t\t\tkpaLabelKey: \"with-you\",\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"a\": \"b\",\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\tAPIVersion: pav1a1.SchemeGroupVersion.String(),\n\t\t\t\tKind: \"PodAutoscaler\",\n\t\t\t\tName: \"with-you\",\n\t\t\t\tUID: \"2006\",\n\t\t\t\tController: &boolTrue,\n\t\t\t\tBlockOwnerDeletion: &boolTrue,\n\t\t\t}},\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\tName: \"metrics\",\n\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\tPort: 9090,\n\t\t\t\tTargetPort: intstr.FromString(\"queue-metrics\"),\n\t\t\t}},\n\t\t\tSelector: selector,\n\t\t},\n\t}\n\tgot := MakeMetricsService(pa, selector)\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"Metrics K8s Service mismatch (-want, +got) = %v\", diff)\n\t}\n}"
] |
f70036544af62e3a238cc85a71a32c3450e32147 | 4,590 | go | Go | encoding/json/register_test.go | nishakhater/yarpc-go | dbbaeda2c7a6097c2c787c6967e6980ec8cde6a2 | [
"MIT"
] | null | null | null | encoding/json/register_test.go | nishakhater/yarpc-go | dbbaeda2c7a6097c2c787c6967e6980ec8cde6a2 | [
"MIT"
] | null | null | null | encoding/json/register_test.go | nishakhater/yarpc-go | dbbaeda2c7a6097c2c787c6967e6980ec8cde6a2 | [
"MIT"
] | null | null | null | // Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package json
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWrapUnaryHandlerInvalid(t *testing.T) {
tests := []struct {
Name string
Func interface{}
}{
{"empty", func() {}},
{"not-a-function", 0},
{
"wrong-args-in",
func(context.Context) (*struct{}, error) {
return nil, nil
},
},
{
"wrong-ctx",
func(string, *struct{}) (*struct{}, error) {
return nil, nil
},
},
{
"wrong-req-body",
func(context.Context, string, int) (*struct{}, error) {
return nil, nil
},
},
{
"wrong-response",
func(context.Context, map[string]interface{}) error {
return nil
},
},
{
"non-pointer-req",
func(context.Context, struct{}) (*struct{}, error) {
return nil, nil
},
},
{
"non-pointer-res",
func(context.Context, *struct{}) (struct{}, error) {
return struct{}{}, nil
},
},
{
"non-string-key",
func(context.Context, map[int32]interface{}) (*struct{}, error) {
return nil, nil
},
},
{
"second-return-value-not-error",
func(context.Context, *struct{}) (*struct{}, *struct{}) {
return nil, nil
},
},
}
for _, tt := range tests {
assert.Panics(t, assert.PanicTestFunc(func() {
wrapUnaryHandler(tt.Name, tt.Func)
}), tt.Name)
}
}
func TestWrapUnaryHandlerValid(t *testing.T) {
tests := []struct {
Name string
Func interface{}
}{
{
"foo",
func(context.Context, *struct{}) (*struct{}, error) {
return nil, nil
},
},
{
"bar",
func(context.Context, map[string]interface{}) (*struct{}, error) {
return nil, nil
},
},
{
"baz",
func(context.Context, map[string]interface{}) (map[string]interface{}, error) {
return nil, nil
},
},
{
"qux",
func(context.Context, interface{}) (map[string]interface{}, error) {
return nil, nil
},
},
}
for _, tt := range tests {
wrapUnaryHandler(tt.Name, tt.Func)
}
}
func TestWrapOnewayHandlerInvalid(t *testing.T) {
tests := []struct {
Name string
Func interface{}
}{
{"empty", func() {}},
{"not-a-function", 0},
{
"wrong-args-in",
func(context.Context) error {
return nil
},
},
{
"wrong-ctx",
func(string, *struct{}) error {
return nil
},
},
{
"wrong-req-body",
func(context.Context, string, int) error {
return nil
},
},
{
"wrong-response",
func(context.Context, map[string]interface{}) (*struct{}, error) {
return nil, nil
},
},
{
"wrong-response-val",
func(context.Context, map[string]interface{}) int {
return 0
},
},
{
"non-pointer-req",
func(context.Context, struct{}) error {
return nil
},
},
{
"non-string-key",
func(context.Context, map[int32]interface{}) error {
return nil
},
},
}
for _, tt := range tests {
assert.Panics(t, assert.PanicTestFunc(func() {
wrapOnewayHandler(tt.Name, tt.Func)
}))
}
}
func TestWrapOnewayHandlerValid(t *testing.T) {
tests := []struct {
Name string
Func interface{}
}{
{
"foo",
func(context.Context, *struct{}) error {
return nil
},
},
{
"bar",
func(context.Context, map[string]interface{}) error {
return nil
},
},
{
"baz",
func(context.Context, map[string]interface{}) error {
return nil
},
},
{
"qux",
func(context.Context, interface{}) error {
return nil
},
},
}
for _, tt := range tests {
wrapOnewayHandler(tt.Name, tt.Func)
}
}
| 20.675676 | 82 | 0.615686 | [
"func TestWrapUnaryHandlerInvalid(t *testing.T) {\n\ttests := []struct {\n\t\tName string\n\t\tFunc interface{}\n\t}{\n\t\t{\"empty\", func() {}},\n\t\t{\"not-a-function\", 0},\n\t\t{\n\t\t\t\"wrong-args-in\",\n\t\t\tfunc(context.Context) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-ctx\",\n\t\t\tfunc(string, *struct{}) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-req-body\",\n\t\t\tfunc(context.Context, string, int) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-response\",\n\t\t\tfunc(context.Context, map[string]interface{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"non-pointer-req\",\n\t\t\tfunc(context.Context, struct{}) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"non-pointer-res\",\n\t\t\tfunc(context.Context, *struct{}) (struct{}, error) {\n\t\t\t\treturn struct{}{}, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"non-string-key\",\n\t\t\tfunc(context.Context, map[int32]interface{}) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"second-return-value-not-error\",\n\t\t\tfunc(context.Context, *struct{}) (*struct{}, *struct{}) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tassert.Panics(t, assert.PanicTestFunc(func() {\n\t\t\twrapUnaryHandler(tt.Name, tt.Func)\n\t\t}), tt.Name)\n\t}\n}",
"func TestWrapUnaryHandlerValid(t *testing.T) {\n\ttests := []struct {\n\t\tName string\n\t\tFunc interface{}\n\t}{\n\t\t{\n\t\t\t\"foo\",\n\t\t\tfunc(context.Context, *struct{}) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"bar\",\n\t\t\tfunc(context.Context, map[string]interface{}) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"baz\",\n\t\t\tfunc(context.Context, map[string]interface{}) (map[string]interface{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"qux\",\n\t\t\tfunc(context.Context, interface{}) (map[string]interface{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\twrapUnaryHandler(tt.Name, tt.Func)\n\t}\n}",
"func TestWrapOnewayHandlerInvalid(t *testing.T) {\n\ttests := []struct {\n\t\tName string\n\t\tFunc interface{}\n\t}{\n\t\t{\"empty\", func() {}},\n\t\t{\"not-a-function\", 0},\n\t\t{\n\t\t\t\"wrong-args-in\",\n\t\t\tfunc(context.Context) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-ctx\",\n\t\t\tfunc(string, *struct{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-req-body\",\n\t\t\tfunc(context.Context, string, int) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-response\",\n\t\t\tfunc(context.Context, map[string]interface{}) (*struct{}, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"wrong-response-val\",\n\t\t\tfunc(context.Context, map[string]interface{}) int {\n\t\t\t\treturn 0\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"non-pointer-req\",\n\t\t\tfunc(context.Context, struct{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"non-string-key\",\n\t\t\tfunc(context.Context, map[int32]interface{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tassert.Panics(t, assert.PanicTestFunc(func() {\n\t\t\twrapOnewayHandler(tt.Name, tt.Func)\n\t\t}))\n\t}\n}",
"func TestWrapOnewayHandlerValid(t *testing.T) {\n\ttests := []struct {\n\t\tName string\n\t\tFunc interface{}\n\t}{\n\t\t{\n\t\t\t\"foo\",\n\t\t\tfunc(context.Context, *struct{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"bar\",\n\t\t\tfunc(context.Context, map[string]interface{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"baz\",\n\t\t\tfunc(context.Context, map[string]interface{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"qux\",\n\t\t\tfunc(context.Context, interface{}) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\twrapOnewayHandler(tt.Name, tt.Func)\n\t}\n}"
] |
f7003e92d4439a3e3aca7d846f26562a35ea34a4 | 221 | go | Go | go_agent/src/bosh/agent/applier/bundlecollection/bundlecollection_suite_test.go | cloudfoundry-community-attic/bosh-cloudstack-cpi | 279eac830413bac109d78a856334a80f85c8308d | [
"Apache-2.0"
] | 1 | 2020-09-18T11:17:07.000Z | 2020-09-18T11:17:07.000Z | go_agent/src/bosh/agent/applier/bundlecollection/bundlecollection_suite_test.go | zengxianhui/bosh-2605 | 0156f559d1c9535c6177012a085ed59c40509ac1 | [
"Ruby",
"Apache-2.0"
] | null | null | null | go_agent/src/bosh/agent/applier/bundlecollection/bundlecollection_suite_test.go | zengxianhui/bosh-2605 | 0156f559d1c9535c6177012a085ed59c40509ac1 | [
"Ruby",
"Apache-2.0"
] | 1 | 2018-08-24T20:19:00.000Z | 2018-08-24T20:19:00.000Z | package bundlecollection_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestBundlecollection(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Bundlecollection Suite")
}
| 15.785714 | 41 | 0.746606 | [
"func TestBundlecollection(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Bundlecollection Suite\")\n}"
] |
f7003f423c2a7cf8e3d5961945a425ee02a0ab28 | 6,045 | go | Go | internal/gitaly/service/objectpool/alternates_test.go | k00mi/gitaly | 666ac9ff4149cc9d8e25cadee708717958a04ef1 | [
"MIT"
] | 1 | 2021-01-20T05:40:29.000Z | 2021-01-20T05:40:29.000Z | internal/gitaly/service/objectpool/alternates_test.go | k00mi/gitaly | 666ac9ff4149cc9d8e25cadee708717958a04ef1 | [
"MIT"
] | null | null | null | internal/gitaly/service/objectpool/alternates_test.go | k00mi/gitaly | 666ac9ff4149cc9d8e25cadee708717958a04ef1 | [
"MIT"
] | null | null | null | package objectpool
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly/internal/git"
"gitlab.com/gitlab-org/gitaly/internal/git/objectpool"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/config"
"gitlab.com/gitlab-org/gitaly/internal/testhelper"
"gitlab.com/gitlab-org/gitaly/proto/go/gitalypb"
)
func TestDisconnectGitAlternates(t *testing.T) {
locator := config.NewLocator(config.Config)
server, serverSocketPath := runObjectPoolServer(t, config.Config, locator)
defer server.Stop()
client, conn := newObjectPoolClient(t, serverSocketPath)
defer conn.Close()
ctx, cancel := testhelper.Context()
defer cancel()
testRepo, testRepoPath, cleanupFn := testhelper.NewTestRepo(t)
defer cleanupFn()
pool, err := objectpool.NewObjectPool(config.Config, locator, testRepo.GetStorageName(), testhelper.NewTestObjectPoolName(t))
require.NoError(t, err)
defer pool.Remove(ctx)
require.NoError(t, pool.Create(ctx, testRepo))
require.NoError(t, pool.Link(ctx, testRepo))
testhelper.MustRunCommand(t, nil, "git", "-C", testRepoPath, "gc")
existingObjectID := "55bc176024cfa3baaceb71db584c7e5df900ea65"
// Corrupt the repository to check that existingObjectID can no longer be found
altPath, err := locator.InfoAlternatesPath(testRepo)
require.NoError(t, err, "find info/alternates")
require.NoError(t, os.RemoveAll(altPath))
cmd, err := git.SafeCmd(ctx, testRepo, nil,
git.SubCmd{Name: "cat-file", Flags: []git.Option{git.Flag{Name: "-e"}}, Args: []string{existingObjectID}})
require.NoError(t, err)
require.Error(t, cmd.Wait(), "expect cat-file to fail because object cannot be found")
require.NoError(t, pool.Link(ctx, testRepo))
require.FileExists(t, altPath, "objects/info/alternates should be back")
// At this point we know that the repository has access to
// existingObjectID, but only if objects/info/alternates is in place.
_, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: testRepo})
require.NoError(t, err, "call DisconnectGitAlternates")
// Check that the object can still be found, even though
// objects/info/alternates is gone. This is the purpose of
// DisconnectGitAlternates.
testhelper.AssertPathNotExists(t, altPath)
testhelper.MustRunCommand(t, nil, "git", "-C", testRepoPath, "cat-file", "-e", existingObjectID)
}
func TestDisconnectGitAlternatesNoAlternates(t *testing.T) {
locator := config.NewLocator(config.Config)
server, serverSocketPath := runObjectPoolServer(t, config.Config, locator)
defer server.Stop()
client, conn := newObjectPoolClient(t, serverSocketPath)
defer conn.Close()
ctx, cancel := testhelper.Context()
defer cancel()
testRepo, testRepoPath, cleanupFn := testhelper.NewTestRepo(t)
defer cleanupFn()
altPath, err := locator.InfoAlternatesPath(testRepo)
require.NoError(t, err, "find info/alternates")
testhelper.AssertPathNotExists(t, altPath)
_, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: testRepo})
require.NoError(t, err, "call DisconnectGitAlternates on repository without alternates")
testhelper.MustRunCommand(t, nil, "git", "-C", testRepoPath, "fsck")
}
func TestDisconnectGitAlternatesUnexpectedAlternates(t *testing.T) {
locator := config.NewLocator(config.Config)
server, serverSocketPath := runObjectPoolServer(t, config.Config, locator)
defer server.Stop()
client, conn := newObjectPoolClient(t, serverSocketPath)
defer conn.Close()
ctx, cancel := testhelper.Context()
defer cancel()
testCases := []struct {
desc string
altContent string
}{
{desc: "multiple alternates", altContent: "/foo/bar\n/qux/baz\n"},
{desc: "directory not found", altContent: "/does/not/exist/\n"},
{desc: "not a directory", altContent: "../HEAD\n"},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
testRepo, _, cleanupFn := testhelper.NewTestRepo(t)
defer cleanupFn()
altPath, err := locator.InfoAlternatesPath(testRepo)
require.NoError(t, err, "find info/alternates")
require.NoError(t, ioutil.WriteFile(altPath, []byte(tc.altContent), 0644))
_, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: testRepo})
require.Error(t, err, "call DisconnectGitAlternates on repository with unexpected objects/info/alternates")
contentAfterRPC, err := ioutil.ReadFile(altPath)
require.NoError(t, err, "read back objects/info/alternates")
require.Equal(t, tc.altContent, string(contentAfterRPC), "objects/info/alternates content should not have changed")
})
}
}
func TestRemoveAlternatesIfOk(t *testing.T) {
ctx, cancel := testhelper.Context()
defer cancel()
testRepo, testRepoPath, cleanupFn := testhelper.NewTestRepo(t)
defer cleanupFn()
locator := config.NewLocator(config.Config)
altPath, err := locator.InfoAlternatesPath(testRepo)
require.NoError(t, err, "find info/alternates")
altContent := "/var/empty\n"
require.NoError(t, ioutil.WriteFile(altPath, []byte(altContent), 0644), "write alternates file")
// Intentionally break the repository, so that 'git fsck' will fail later.
testhelper.MustRunCommand(t, nil, "sh", "-c", fmt.Sprintf("rm %s/objects/pack/*.pack", testRepoPath))
altBackup := altPath + ".backup"
err = removeAlternatesIfOk(ctx, testRepo, altPath, altBackup)
require.Error(t, err, "removeAlternatesIfOk should fail")
require.IsType(t, &fsckError{}, err, "error must be because of fsck")
// We expect objects/info/alternates to have been restored when
// removeAlternatesIfOk returned.
assertAlternates(t, altPath, altContent)
// We expect the backup alternates file to still exist.
assertAlternates(t, altBackup, altContent)
}
func assertAlternates(t *testing.T, altPath string, altContent string) {
actualContent, err := ioutil.ReadFile(altPath)
require.NoError(t, err, "read %s after fsck failure", altPath)
require.Equal(t, altContent, string(actualContent), "%s content after fsck failure", altPath)
}
| 36.197605 | 126 | 0.750868 | [
"func TestDisconnectGitAlternates(t *testing.T) {\n\tlocator := config.NewLocator(config.Config)\n\tserver, serverSocketPath := runObjectPoolServer(t, config.Config, locator)\n\tdefer server.Stop()\n\n\tclient, conn := newObjectPoolClient(t, serverSocketPath)\n\tdefer conn.Close()\n\n\tctx, cancel := testhelper.Context()\n\tdefer cancel()\n\n\ttestRepo, testRepoPath, cleanupFn := testhelper.NewTestRepo(t)\n\tdefer cleanupFn()\n\n\tpool, err := objectpool.NewObjectPool(config.Config, locator, testRepo.GetStorageName(), testhelper.NewTestObjectPoolName(t))\n\trequire.NoError(t, err)\n\tdefer pool.Remove(ctx)\n\n\trequire.NoError(t, pool.Create(ctx, testRepo))\n\trequire.NoError(t, pool.Link(ctx, testRepo))\n\ttesthelper.MustRunCommand(t, nil, \"git\", \"-C\", testRepoPath, \"gc\")\n\n\texistingObjectID := \"55bc176024cfa3baaceb71db584c7e5df900ea65\"\n\n\t// Corrupt the repository to check that existingObjectID can no longer be found\n\taltPath, err := locator.InfoAlternatesPath(testRepo)\n\trequire.NoError(t, err, \"find info/alternates\")\n\trequire.NoError(t, os.RemoveAll(altPath))\n\n\tcmd, err := git.SafeCmd(ctx, testRepo, nil,\n\t\tgit.SubCmd{Name: \"cat-file\", Flags: []git.Option{git.Flag{Name: \"-e\"}}, Args: []string{existingObjectID}})\n\trequire.NoError(t, err)\n\trequire.Error(t, cmd.Wait(), \"expect cat-file to fail because object cannot be found\")\n\n\trequire.NoError(t, pool.Link(ctx, testRepo))\n\trequire.FileExists(t, altPath, \"objects/info/alternates should be back\")\n\n\t// At this point we know that the repository has access to\n\t// existingObjectID, but only if objects/info/alternates is in place.\n\n\t_, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: testRepo})\n\trequire.NoError(t, err, \"call DisconnectGitAlternates\")\n\n\t// Check that the object can still be found, even though\n\t// objects/info/alternates is gone. This is the purpose of\n\t// DisconnectGitAlternates.\n\ttesthelper.AssertPathNotExists(t, altPath)\n\ttesthelper.MustRunCommand(t, nil, \"git\", \"-C\", testRepoPath, \"cat-file\", \"-e\", existingObjectID)\n}",
"func TestDisconnectGitAlternatesNoAlternates(t *testing.T) {\n\tlocator := config.NewLocator(config.Config)\n\tserver, serverSocketPath := runObjectPoolServer(t, config.Config, locator)\n\tdefer server.Stop()\n\n\tclient, conn := newObjectPoolClient(t, serverSocketPath)\n\tdefer conn.Close()\n\n\tctx, cancel := testhelper.Context()\n\tdefer cancel()\n\n\ttestRepo, testRepoPath, cleanupFn := testhelper.NewTestRepo(t)\n\tdefer cleanupFn()\n\n\taltPath, err := locator.InfoAlternatesPath(testRepo)\n\trequire.NoError(t, err, \"find info/alternates\")\n\ttesthelper.AssertPathNotExists(t, altPath)\n\n\t_, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: testRepo})\n\trequire.NoError(t, err, \"call DisconnectGitAlternates on repository without alternates\")\n\n\ttesthelper.MustRunCommand(t, nil, \"git\", \"-C\", testRepoPath, \"fsck\")\n}",
"func TestDisconnectGitAlternatesUnexpectedAlternates(t *testing.T) {\n\tlocator := config.NewLocator(config.Config)\n\tserver, serverSocketPath := runObjectPoolServer(t, config.Config, locator)\n\tdefer server.Stop()\n\n\tclient, conn := newObjectPoolClient(t, serverSocketPath)\n\tdefer conn.Close()\n\n\tctx, cancel := testhelper.Context()\n\tdefer cancel()\n\n\ttestCases := []struct {\n\t\tdesc string\n\t\taltContent string\n\t}{\n\t\t{desc: \"multiple alternates\", altContent: \"/foo/bar\\n/qux/baz\\n\"},\n\t\t{desc: \"directory not found\", altContent: \"/does/not/exist/\\n\"},\n\t\t{desc: \"not a directory\", altContent: \"../HEAD\\n\"},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\ttestRepo, _, cleanupFn := testhelper.NewTestRepo(t)\n\t\t\tdefer cleanupFn()\n\n\t\t\taltPath, err := locator.InfoAlternatesPath(testRepo)\n\t\t\trequire.NoError(t, err, \"find info/alternates\")\n\n\t\t\trequire.NoError(t, ioutil.WriteFile(altPath, []byte(tc.altContent), 0644))\n\n\t\t\t_, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: testRepo})\n\t\t\trequire.Error(t, err, \"call DisconnectGitAlternates on repository with unexpected objects/info/alternates\")\n\n\t\t\tcontentAfterRPC, err := ioutil.ReadFile(altPath)\n\t\t\trequire.NoError(t, err, \"read back objects/info/alternates\")\n\t\t\trequire.Equal(t, tc.altContent, string(contentAfterRPC), \"objects/info/alternates content should not have changed\")\n\t\t})\n\t}\n}",
"func TestRemoveAlternatesIfOk(t *testing.T) {\n\tctx, cancel := testhelper.Context()\n\tdefer cancel()\n\n\ttestRepo, testRepoPath, cleanupFn := testhelper.NewTestRepo(t)\n\tdefer cleanupFn()\n\n\tlocator := config.NewLocator(config.Config)\n\taltPath, err := locator.InfoAlternatesPath(testRepo)\n\trequire.NoError(t, err, \"find info/alternates\")\n\taltContent := \"/var/empty\\n\"\n\trequire.NoError(t, ioutil.WriteFile(altPath, []byte(altContent), 0644), \"write alternates file\")\n\n\t// Intentionally break the repository, so that 'git fsck' will fail later.\n\ttesthelper.MustRunCommand(t, nil, \"sh\", \"-c\", fmt.Sprintf(\"rm %s/objects/pack/*.pack\", testRepoPath))\n\n\taltBackup := altPath + \".backup\"\n\n\terr = removeAlternatesIfOk(ctx, testRepo, altPath, altBackup)\n\trequire.Error(t, err, \"removeAlternatesIfOk should fail\")\n\trequire.IsType(t, &fsckError{}, err, \"error must be because of fsck\")\n\n\t// We expect objects/info/alternates to have been restored when\n\t// removeAlternatesIfOk returned.\n\tassertAlternates(t, altPath, altContent)\n\n\t// We expect the backup alternates file to still exist.\n\tassertAlternates(t, altBackup, altContent)\n}"
] |
f700417668b2ee0a871d2cd13966ac0cec01bd70 | 6,417 | go | Go | contrib/mongodb/mongo-go-driver/vendor/github.com/mongodb/mongo-go-driver/mongo/client_options_test.go | FlamingTree/dd-trace-go | 827caf89bbb4b955554157db7b0d7e59959a1408 | [
"BSD-3-Clause"
] | null | null | null | contrib/mongodb/mongo-go-driver/vendor/github.com/mongodb/mongo-go-driver/mongo/client_options_test.go | FlamingTree/dd-trace-go | 827caf89bbb4b955554157db7b0d7e59959a1408 | [
"BSD-3-Clause"
] | null | null | null | contrib/mongodb/mongo-go-driver/vendor/github.com/mongodb/mongo-go-driver/mongo/client_options_test.go | FlamingTree/dd-trace-go | 827caf89bbb4b955554157db7b0d7e59959a1408 | [
"BSD-3-Clause"
] | null | null | null | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mongo
import (
"context"
"net"
"sync/atomic"
"testing"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/core/connstring"
"github.com/mongodb/mongo-go-driver/core/readconcern"
"github.com/mongodb/mongo-go-driver/core/readpref"
"github.com/mongodb/mongo-go-driver/core/tag"
"github.com/mongodb/mongo-go-driver/core/writeconcern"
"github.com/mongodb/mongo-go-driver/internal/testutil"
"github.com/mongodb/mongo-go-driver/mongo/clientopt"
"github.com/stretchr/testify/require"
)
func TestClientOptions_simple(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip()
}
cs := testutil.ConnString(t)
client, err := NewClientWithOptions(cs.String(), clientopt.AppName("foo"))
require.NoError(t, err)
require.Equal(t, "foo", client.connString.AppName)
}
func TestClientOptions_deferToConnString(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip()
}
cs := testutil.ConnString(t)
uri := testutil.AddOptionsToURI(cs.String(), "appname=bar")
client, err := NewClientWithOptions(uri, clientopt.AppName("foo"))
require.NoError(t, err)
require.Equal(t, "bar", client.connString.AppName)
}
func TestClientOptions_doesNotAlterConnectionString(t *testing.T) {
t.Parallel()
cs := connstring.ConnString{}
client, err := newClient(cs, clientopt.AppName("foobar"))
require.NoError(t, err)
if cs.AppName != "" {
t.Errorf("Creating a new Client should not alter the original connection string, but it did. got %s; want <empty>", cs.AppName)
}
if client.connString.AppName != "foobar" {
t.Errorf("Creating a new Client should alter the internal copy of the connection string, but it didn't. got %s; want %s", client.connString.AppName, "foobar")
}
}
func TestClientOptions_chainAll(t *testing.T) {
t.Parallel()
readPrefMode, err := readpref.ModeFromString("secondary")
require.NoError(t, err)
rp, err := readpref.New(
readPrefMode,
readpref.WithTagSets(tag.NewTagSetsFromMaps([]map[string]string{{"nyc": "1"}})...),
readpref.WithMaxStaleness(2*time.Second),
)
rc := readconcern.New(readconcern.Level("majority"))
wc := writeconcern.New(
writeconcern.J(true),
writeconcern.WTagSet("majority"),
writeconcern.W(3),
writeconcern.WTimeout(2*time.Second),
)
require.NoError(t, err)
opts := clientopt.BundleClient().
AppName("foo").
Auth(clientopt.Credential{
AuthMechanism: "MONGODB-X509",
AuthMechanismProperties: map[string]string{"foo": "bar"},
AuthSource: "$external",
Password: "supersecurepassword",
Username: "admin",
}).
ConnectTimeout(500 * time.Millisecond).
HeartbeatInterval(15 * time.Second).
Hosts([]string{
"mongodb://localhost:27018",
"mongodb://localhost:27019"}).
LocalThreshold(time.Second).
MaxConnIdleTime(30 * time.Second).
MaxConnsPerHost(150).
MaxIdleConnsPerHost(20).
ReadConcern(rc).
ReadPreference(rp).
ReplicaSet("foo").
RetryWrites(true).
ServerSelectionTimeout(time.Second).
Single(false).
SocketTimeout(2 * time.Second).
SSL(&clientopt.SSLOpt{
Enabled: true,
ClientCertificateKeyFile: "client.pem",
ClientCertificateKeyPassword: nil,
Insecure: false,
CaFile: "ca.pem",
}).
WriteConcern(wc)
expectedClient := &clientopt.Client{
TopologyOptions: nil,
ConnString: connstring.ConnString{
AppName: "foo",
AuthMechanism: "MONGODB-X509",
AuthMechanismProperties: map[string]string{"foo": "bar"},
AuthSource: "$external",
Username: "admin",
Password: "supersecurepassword",
ConnectTimeout: 500 * time.Millisecond,
ConnectTimeoutSet: true,
HeartbeatInterval: 15 * time.Second,
HeartbeatIntervalSet: true,
Hosts: []string{
"mongodb://localhost:27018",
"mongodb://localhost:27019",
},
LocalThresholdSet: true,
LocalThreshold: time.Second,
MaxConnIdleTime: 30 * time.Second,
MaxConnIdleTimeSet: true,
MaxConnsPerHost: 150,
MaxConnsPerHostSet: true,
MaxIdleConnsPerHost: 20,
MaxIdleConnsPerHostSet: true,
ReplicaSet: "foo",
ServerSelectionTimeoutSet: true,
ServerSelectionTimeout: time.Second,
Connect: connstring.AutoConnect,
ConnectSet: true,
SocketTimeout: 2 * time.Second,
SocketTimeoutSet: true,
SSL: true,
SSLSet: true,
SSLClientCertificateKeyFile: "client.pem",
SSLClientCertificateKeyFileSet: true,
SSLClientCertificateKeyPassword: nil,
SSLClientCertificateKeyPasswordSet: true,
SSLInsecure: false,
SSLInsecureSet: true,
SSLCaFile: "ca.pem",
SSLCaFileSet: true,
},
ReadConcern: rc,
ReadPreference: rp,
WriteConcern: wc,
RetryWrites: true,
RetryWritesSet: true,
}
client, err := opts.Unbundle(connstring.ConnString{})
require.NoError(t, err)
require.NotNil(t, client)
require.Equal(t, expectedClient, client)
}
func TestClientOptions_CustomDialer(t *testing.T) {
td := &testDialer{d: &net.Dialer{}}
opts := clientopt.Dialer(td)
client, err := newClient(testutil.ConnString(t), opts)
require.NoError(t, err)
err = client.Connect(context.Background())
require.NoError(t, err)
_, err = client.ListDatabases(context.Background(), bson.NewDocument())
require.NoError(t, err)
got := atomic.LoadInt32(&td.called)
if got < 1 {
t.Errorf("Custom dialer was not used when dialing new connections")
}
}
type testDialer struct {
called int32
d Dialer
}
func (td *testDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
atomic.AddInt32(&td.called, 1)
return td.d.DialContext(ctx, network, address)
}
| 31.610837 | 160 | 0.647499 | [
"func TestClientOptions_simple(t *testing.T) {\n\tt.Parallel()\n\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\n\tcs := testutil.ConnString(t)\n\tclient, err := NewClientWithOptions(cs.String(), clientopt.AppName(\"foo\"))\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"foo\", client.connString.AppName)\n}",
"func TestClientOptions_deferToConnString(t *testing.T) {\n\tt.Parallel()\n\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\n\tcs := testutil.ConnString(t)\n\turi := testutil.AddOptionsToURI(cs.String(), \"appname=bar\")\n\n\tclient, err := NewClientWithOptions(uri, clientopt.AppName(\"foo\"))\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"bar\", client.connString.AppName)\n}",
"func TestClientOptions_doesNotAlterConnectionString(t *testing.T) {\n\tt.Parallel()\n\n\tcs := connstring.ConnString{}\n\tclient, err := newClient(cs, clientopt.AppName(\"foobar\"))\n\trequire.NoError(t, err)\n\tif cs.AppName != \"\" {\n\t\tt.Errorf(\"Creating a new Client should not alter the original connection string, but it did. got %s; want <empty>\", cs.AppName)\n\t}\n\tif client.connString.AppName != \"foobar\" {\n\t\tt.Errorf(\"Creating a new Client should alter the internal copy of the connection string, but it didn't. got %s; want %s\", client.connString.AppName, \"foobar\")\n\t}\n}",
"func TestClientOptions_chainAll(t *testing.T) {\n\tt.Parallel()\n\treadPrefMode, err := readpref.ModeFromString(\"secondary\")\n\trequire.NoError(t, err)\n\trp, err := readpref.New(\n\t\treadPrefMode,\n\t\treadpref.WithTagSets(tag.NewTagSetsFromMaps([]map[string]string{{\"nyc\": \"1\"}})...),\n\t\treadpref.WithMaxStaleness(2*time.Second),\n\t)\n\trc := readconcern.New(readconcern.Level(\"majority\"))\n\twc := writeconcern.New(\n\t\twriteconcern.J(true),\n\t\twriteconcern.WTagSet(\"majority\"),\n\t\twriteconcern.W(3),\n\t\twriteconcern.WTimeout(2*time.Second),\n\t)\n\trequire.NoError(t, err)\n\topts := clientopt.BundleClient().\n\t\tAppName(\"foo\").\n\t\tAuth(clientopt.Credential{\n\t\t\tAuthMechanism: \"MONGODB-X509\",\n\t\t\tAuthMechanismProperties: map[string]string{\"foo\": \"bar\"},\n\t\t\tAuthSource: \"$external\",\n\t\t\tPassword: \"supersecurepassword\",\n\t\t\tUsername: \"admin\",\n\t\t}).\n\t\tConnectTimeout(500 * time.Millisecond).\n\t\tHeartbeatInterval(15 * time.Second).\n\t\tHosts([]string{\n\t\t\t\"mongodb://localhost:27018\",\n\t\t\t\"mongodb://localhost:27019\"}).\n\t\tLocalThreshold(time.Second).\n\t\tMaxConnIdleTime(30 * time.Second).\n\t\tMaxConnsPerHost(150).\n\t\tMaxIdleConnsPerHost(20).\n\t\tReadConcern(rc).\n\t\tReadPreference(rp).\n\t\tReplicaSet(\"foo\").\n\t\tRetryWrites(true).\n\t\tServerSelectionTimeout(time.Second).\n\t\tSingle(false).\n\t\tSocketTimeout(2 * time.Second).\n\t\tSSL(&clientopt.SSLOpt{\n\t\t\tEnabled: true,\n\t\t\tClientCertificateKeyFile: \"client.pem\",\n\t\t\tClientCertificateKeyPassword: nil,\n\t\t\tInsecure: false,\n\t\t\tCaFile: \"ca.pem\",\n\t\t}).\n\t\tWriteConcern(wc)\n\n\texpectedClient := &clientopt.Client{\n\t\tTopologyOptions: nil,\n\t\tConnString: connstring.ConnString{\n\t\t\tAppName: \"foo\",\n\t\t\tAuthMechanism: \"MONGODB-X509\",\n\t\t\tAuthMechanismProperties: map[string]string{\"foo\": \"bar\"},\n\t\t\tAuthSource: \"$external\",\n\t\t\tUsername: \"admin\",\n\t\t\tPassword: \"supersecurepassword\",\n\t\t\tConnectTimeout: 500 * time.Millisecond,\n\t\t\tConnectTimeoutSet: true,\n\t\t\tHeartbeatInterval: 15 * time.Second,\n\t\t\tHeartbeatIntervalSet: true,\n\t\t\tHosts: []string{\n\t\t\t\t\"mongodb://localhost:27018\",\n\t\t\t\t\"mongodb://localhost:27019\",\n\t\t\t},\n\t\t\tLocalThresholdSet: true,\n\t\t\tLocalThreshold: time.Second,\n\t\t\tMaxConnIdleTime: 30 * time.Second,\n\t\t\tMaxConnIdleTimeSet: true,\n\t\t\tMaxConnsPerHost: 150,\n\t\t\tMaxConnsPerHostSet: true,\n\t\t\tMaxIdleConnsPerHost: 20,\n\t\t\tMaxIdleConnsPerHostSet: true,\n\t\t\tReplicaSet: \"foo\",\n\t\t\tServerSelectionTimeoutSet: true,\n\t\t\tServerSelectionTimeout: time.Second,\n\t\t\tConnect: connstring.AutoConnect,\n\t\t\tConnectSet: true,\n\t\t\tSocketTimeout: 2 * time.Second,\n\t\t\tSocketTimeoutSet: true,\n\t\t\tSSL: true,\n\t\t\tSSLSet: true,\n\t\t\tSSLClientCertificateKeyFile: \"client.pem\",\n\t\t\tSSLClientCertificateKeyFileSet: true,\n\t\t\tSSLClientCertificateKeyPassword: nil,\n\t\t\tSSLClientCertificateKeyPasswordSet: true,\n\t\t\tSSLInsecure: false,\n\t\t\tSSLInsecureSet: true,\n\t\t\tSSLCaFile: \"ca.pem\",\n\t\t\tSSLCaFileSet: true,\n\t\t},\n\t\tReadConcern: rc,\n\t\tReadPreference: rp,\n\t\tWriteConcern: wc,\n\t\tRetryWrites: true,\n\t\tRetryWritesSet: true,\n\t}\n\n\tclient, err := opts.Unbundle(connstring.ConnString{})\n\trequire.NoError(t, err)\n\trequire.NotNil(t, client)\n\trequire.Equal(t, expectedClient, client)\n}",
"func TestClientOptions_CustomDialer(t *testing.T) {\n\ttd := &testDialer{d: &net.Dialer{}}\n\topts := clientopt.Dialer(td)\n\tclient, err := newClient(testutil.ConnString(t), opts)\n\trequire.NoError(t, err)\n\terr = client.Connect(context.Background())\n\trequire.NoError(t, err)\n\t_, err = client.ListDatabases(context.Background(), bson.NewDocument())\n\trequire.NoError(t, err)\n\tgot := atomic.LoadInt32(&td.called)\n\tif got < 1 {\n\t\tt.Errorf(\"Custom dialer was not used when dialing new connections\")\n\t}\n}"
] |
f70046e4048897753a40a3d78933ed37ca50ffd8 | 684 | go | Go | backend/service/topology/topology_test.go | titansmc/clutch | 96be7bc7d1eeb5c06932690dfd8f9e04387b46be | [
"Apache-2.0"
] | null | null | null | backend/service/topology/topology_test.go | titansmc/clutch | 96be7bc7d1eeb5c06932690dfd8f9e04387b46be | [
"Apache-2.0"
] | 30 | 2020-09-16T19:53:30.000Z | 2021-11-02T15:08:38.000Z | backend/service/topology/topology_test.go | titansmc/clutch | 96be7bc7d1eeb5c06932690dfd8f9e04387b46be | [
"Apache-2.0"
] | null | null | null | package topology
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/uber-go/tally/v4"
"go.uber.org/zap/zaptest"
"google.golang.org/protobuf/types/known/anypb"
topologyv1 "github.com/lyft/clutch/backend/api/config/service/topology/v1"
)
func TestNew(t *testing.T) {
cfg, _ := anypb.New(&topologyv1.Config{})
log := zaptest.NewLogger(t)
scope := tally.NewTestScope("", nil)
// Should throw error looking for postgres
_, err := New(cfg, log, scope)
assert.Error(t, err)
}
func TestNewWithWrongConfig(t *testing.T) {
_, err := New(&anypb.Any{TypeUrl: "foobar"}, nil, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "mismatched message")
}
| 22.8 | 75 | 0.70614 | [
"func TestNew(t *testing.T) {\n\tcfg, _ := anypb.New(&topologyv1.Config{})\n\n\tlog := zaptest.NewLogger(t)\n\tscope := tally.NewTestScope(\"\", nil)\n\n\t// Should throw error looking for postgres\n\t_, err := New(cfg, log, scope)\n\tassert.Error(t, err)\n}",
"func TestNewWithWrongConfig(t *testing.T) {\n\t_, err := New(&anypb.Any{TypeUrl: \"foobar\"}, nil, nil)\n\tassert.Error(t, err)\n\tassert.Contains(t, err.Error(), \"mismatched message\")\n}"
] |
f7004df6820a7c5527fbac06d58dae8cbf6b9cda | 20,444 | go | Go | x/ibc/core/04-channel/types/msgs_test.go | alessio/cosmos-sdk | c0d723314133f96bd118e29401d99187a90ce986 | [
"Apache-2.0"
] | null | null | null | x/ibc/core/04-channel/types/msgs_test.go | alessio/cosmos-sdk | c0d723314133f96bd118e29401d99187a90ce986 | [
"Apache-2.0"
] | null | null | null | x/ibc/core/04-channel/types/msgs_test.go | alessio/cosmos-sdk | c0d723314133f96bd118e29401d99187a90ce986 | [
"Apache-2.0"
] | null | null | null | package types_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
"github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
"github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
)
const (
// valid constatns used for testing
portid = "testportid"
chanid = "testchannel"
cpportid = "testcpport"
cpchanid = "testcpchannel"
version = "1.0"
// invalid constants used for testing
invalidPort = "(invalidport1)"
invalidShortPort = "p"
invalidLongPort = "invalidlongportinvalidlongportinvalidlongportidinvalidlongportidinvalid"
invalidChannel = "(invalidchannel1)"
invalidShortChannel = "invalidch"
invalidLongChannel = "invalidlongchannelinvalidlongchannelinvalidlongchannelinvalidlongchannel"
invalidConnection = "(invalidconnection1)"
invalidShortConnection = "invalidcn"
invalidLongConnection = "invalidlongconnectioninvalidlongconnectioninvalidlongconnectioninvalid"
)
// define variables used for testing
var (
height = clienttypes.NewHeight(0, 1)
timeoutHeight = clienttypes.NewHeight(0, 100)
timeoutTimestamp = uint64(100)
disabledTimeout = clienttypes.ZeroHeight()
validPacketData = []byte("testdata")
unknownPacketData = []byte("unknown")
packet = types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp)
invalidPacket = types.NewPacket(unknownPacketData, 0, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp)
emptyProof = []byte{}
invalidProofs1 = exported.Proof(nil)
invalidProofs2 = emptyProof
addr = sdk.AccAddress("testaddr111111111111")
emptyAddr sdk.AccAddress
connHops = []string{"testconnection"}
invalidConnHops = []string{"testconnection", "testconnection"}
invalidShortConnHops = []string{invalidShortConnection}
invalidLongConnHops = []string{invalidLongConnection}
)
type TypesTestSuite struct {
suite.Suite
proof []byte
}
func (suite *TypesTestSuite) SetupTest() {
app := simapp.Setup(false)
db := dbm.NewMemDB()
store := rootmulti.NewStore(db)
storeKey := storetypes.NewKVStoreKey("iavlStoreKey")
store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil)
store.LoadVersion(0)
iavlStore := store.GetCommitStore(storeKey).(*iavl.Store)
iavlStore.Set([]byte("KEY"), []byte("VALUE"))
_ = store.Commit()
res := store.Query(abci.RequestQuery{
Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof
Data: []byte("KEY"),
Prove: true,
})
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
suite.Require().NoError(err)
proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof)
suite.Require().NoError(err)
suite.proof = proof
}
func TestTypesTestSuite(t *testing.T) {
suite.Run(t, new(TypesTestSuite))
}
func (suite *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() {
counterparty := types.NewCounterparty(cpportid, cpchanid)
tryOpenChannel := types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, connHops, version)
testCases := []struct {
name string
msg *types.MsgChannelOpenInit
expPass bool
}{
{"", types.NewMsgChannelOpenInit(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, addr), true},
{"too short port id", types.NewMsgChannelOpenInit(invalidShortPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, addr), false},
{"too long port id", types.NewMsgChannelOpenInit(invalidLongPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, addr), false},
{"port id contains non-alpha", types.NewMsgChannelOpenInit(invalidPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, addr), false},
{"too short channel id", types.NewMsgChannelOpenInit(portid, invalidShortChannel, version, types.ORDERED, connHops, cpportid, cpchanid, addr), false},
{"too long channel id", types.NewMsgChannelOpenInit(portid, invalidLongChannel, version, types.ORDERED, connHops, cpportid, cpchanid, addr), false},
{"channel id contains non-alpha", types.NewMsgChannelOpenInit(portid, invalidChannel, version, types.ORDERED, connHops, cpportid, cpchanid, addr), false},
{"invalid channel order", types.NewMsgChannelOpenInit(portid, chanid, version, types.Order(3), connHops, cpportid, cpchanid, addr), false},
{"connection hops more than 1 ", types.NewMsgChannelOpenInit(portid, chanid, version, types.ORDERED, invalidConnHops, cpportid, cpchanid, addr), false},
{"too short connection id", types.NewMsgChannelOpenInit(portid, chanid, version, types.UNORDERED, invalidShortConnHops, cpportid, cpchanid, addr), false},
{"too long connection id", types.NewMsgChannelOpenInit(portid, chanid, version, types.UNORDERED, invalidLongConnHops, cpportid, cpchanid, addr), false},
{"connection id contains non-alpha", types.NewMsgChannelOpenInit(portid, chanid, version, types.UNORDERED, []string{invalidConnection}, cpportid, cpchanid, addr), false},
{"", types.NewMsgChannelOpenInit(portid, chanid, "", types.UNORDERED, connHops, cpportid, cpchanid, addr), true},
{"invalid counterparty port id", types.NewMsgChannelOpenInit(portid, chanid, version, types.UNORDERED, connHops, invalidPort, cpchanid, addr), false},
{"invalid counterparty channel id", types.NewMsgChannelOpenInit(portid, chanid, version, types.UNORDERED, connHops, cpportid, invalidChannel, addr), false},
{"channel not in INIT state", &types.MsgChannelOpenInit{portid, chanid, tryOpenChannel, addr.String()}, false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() {
counterparty := types.NewCounterparty(cpportid, cpchanid)
initChannel := types.NewChannel(types.INIT, types.ORDERED, counterparty, connHops, version)
testCases := []struct {
name string
msg *types.MsgChannelOpenTry
expPass bool
}{
{"", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true},
{"too short port id", types.NewMsgChannelOpenTry(invalidShortPort, chanid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"too long port id", types.NewMsgChannelOpenTry(invalidLongPort, chanid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"port id contains non-alpha", types.NewMsgChannelOpenTry(invalidPort, chanid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"too short channel id", types.NewMsgChannelOpenTry(portid, invalidShortChannel, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"too long channel id", types.NewMsgChannelOpenTry(portid, invalidLongChannel, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"channel id contains non-alpha", types.NewMsgChannelOpenTry(portid, invalidChannel, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, "", suite.proof, height, addr), true},
{"proof height is zero", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, clienttypes.ZeroHeight(), addr), false},
{"invalid channel order", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.Order(4), connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"connection hops more than 1 ", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, invalidConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"too short connection id", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, invalidShortConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"too long connection id", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, invalidLongConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"connection id contains non-alpha", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, []string{invalidConnection}, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"", types.NewMsgChannelOpenTry(portid, chanid, chanid, "", types.UNORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true},
{"invalid counterparty port id", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, connHops, invalidPort, cpchanid, version, suite.proof, height, addr), false},
{"invalid counterparty channel id", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, connHops, cpportid, invalidChannel, version, suite.proof, height, addr), false},
{"empty proof", types.NewMsgChannelOpenTry(portid, chanid, chanid, version, types.UNORDERED, connHops, cpportid, cpchanid, version, emptyProof, height, addr), false},
{"valid empty proved channel id", types.NewMsgChannelOpenTry(portid, chanid, "", version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true},
{"invalid proved channel id, doesn't match channel id", types.NewMsgChannelOpenTry(portid, chanid, "differentchannel", version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
{"channel not in TRYOPEN state", &types.MsgChannelOpenTry{portid, chanid, chanid, initChannel, version, suite.proof, height, addr.String()}, false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() {
testCases := []struct {
name string
msg *types.MsgChannelOpenAck
expPass bool
}{
{"", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, height, addr), true},
{"too short port id", types.NewMsgChannelOpenAck(invalidShortPort, chanid, chanid, version, suite.proof, height, addr), false},
{"too long port id", types.NewMsgChannelOpenAck(invalidLongPort, chanid, chanid, version, suite.proof, height, addr), false},
{"port id contains non-alpha", types.NewMsgChannelOpenAck(invalidPort, chanid, chanid, version, suite.proof, height, addr), false},
{"too short channel id", types.NewMsgChannelOpenAck(portid, invalidShortChannel, chanid, version, suite.proof, height, addr), false},
{"too long channel id", types.NewMsgChannelOpenAck(portid, invalidLongChannel, chanid, version, suite.proof, height, addr), false},
{"channel id contains non-alpha", types.NewMsgChannelOpenAck(portid, invalidChannel, chanid, version, suite.proof, height, addr), false},
{"", types.NewMsgChannelOpenAck(portid, chanid, chanid, "", suite.proof, height, addr), true},
{"empty proof", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, emptyProof, height, addr), false},
{"proof height is zero", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, clienttypes.ZeroHeight(), addr), false},
{"invalid counterparty channel id", types.NewMsgChannelOpenAck(portid, chanid, invalidShortChannel, version, suite.proof, height, addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() {
testCases := []struct {
name string
msg *types.MsgChannelOpenConfirm
expPass bool
}{
{"", types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, height, addr), true},
{"too short port id", types.NewMsgChannelOpenConfirm(invalidShortPort, chanid, suite.proof, height, addr), false},
{"too long port id", types.NewMsgChannelOpenConfirm(invalidLongPort, chanid, suite.proof, height, addr), false},
{"port id contains non-alpha", types.NewMsgChannelOpenConfirm(invalidPort, chanid, suite.proof, height, addr), false},
{"too short channel id", types.NewMsgChannelOpenConfirm(portid, invalidShortChannel, suite.proof, height, addr), false},
{"too long channel id", types.NewMsgChannelOpenConfirm(portid, invalidLongChannel, suite.proof, height, addr), false},
{"channel id contains non-alpha", types.NewMsgChannelOpenConfirm(portid, invalidChannel, suite.proof, height, addr), false},
{"empty proof", types.NewMsgChannelOpenConfirm(portid, chanid, emptyProof, height, addr), false},
{"proof height is zero", types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, clienttypes.ZeroHeight(), addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgChannelCloseInitValidateBasic() {
testCases := []struct {
name string
msg *types.MsgChannelCloseInit
expPass bool
}{
{"", types.NewMsgChannelCloseInit(portid, chanid, addr), true},
{"too short port id", types.NewMsgChannelCloseInit(invalidShortPort, chanid, addr), false},
{"too long port id", types.NewMsgChannelCloseInit(invalidLongPort, chanid, addr), false},
{"port id contains non-alpha", types.NewMsgChannelCloseInit(invalidPort, chanid, addr), false},
{"too short channel id", types.NewMsgChannelCloseInit(portid, invalidShortChannel, addr), false},
{"too long channel id", types.NewMsgChannelCloseInit(portid, invalidLongChannel, addr), false},
{"channel id contains non-alpha", types.NewMsgChannelCloseInit(portid, invalidChannel, addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() {
testCases := []struct {
name string
msg *types.MsgChannelCloseConfirm
expPass bool
}{
{"", types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, height, addr), true},
{"too short port id", types.NewMsgChannelCloseConfirm(invalidShortPort, chanid, suite.proof, height, addr), false},
{"too long port id", types.NewMsgChannelCloseConfirm(invalidLongPort, chanid, suite.proof, height, addr), false},
{"port id contains non-alpha", types.NewMsgChannelCloseConfirm(invalidPort, chanid, suite.proof, height, addr), false},
{"too short channel id", types.NewMsgChannelCloseConfirm(portid, invalidShortChannel, suite.proof, height, addr), false},
{"too long channel id", types.NewMsgChannelCloseConfirm(portid, invalidLongChannel, suite.proof, height, addr), false},
{"channel id contains non-alpha", types.NewMsgChannelCloseConfirm(portid, invalidChannel, suite.proof, height, addr), false},
{"empty proof", types.NewMsgChannelCloseConfirm(portid, chanid, emptyProof, height, addr), false},
{"proof height is zero", types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, clienttypes.ZeroHeight(), addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgRecvPacketType() {
msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr)
suite.Equal("recv_packet", msg.Type())
}
func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() {
testCases := []struct {
name string
msg *types.MsgRecvPacket
expPass bool
}{
{"", types.NewMsgRecvPacket(packet, suite.proof, height, addr), true},
{"proof height is zero", types.NewMsgRecvPacket(packet, suite.proof, clienttypes.ZeroHeight(), addr), false},
{"proof contain empty proof", types.NewMsgRecvPacket(packet, emptyProof, height, addr), false},
{"missing signer address", types.NewMsgRecvPacket(packet, suite.proof, height, emptyAddr), false},
{"invalid packet", types.NewMsgRecvPacket(invalidPacket, suite.proof, height, addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.NoError(err)
} else {
suite.Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgRecvPacketGetSigners() {
msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr)
res := msg.GetSigners()
expected := "[7465737461646472313131313131313131313131]"
suite.Equal(expected, fmt.Sprintf("%v", res))
}
func (suite *TypesTestSuite) TestMsgTimeoutValidateBasic() {
testCases := []struct {
name string
msg *types.MsgTimeout
expPass bool
}{
{"", types.NewMsgTimeout(packet, 1, suite.proof, height, addr), true},
{"proof height must be > 0", types.NewMsgTimeout(packet, 1, suite.proof, clienttypes.ZeroHeight(), addr), false},
{"missing signer address", types.NewMsgTimeout(packet, 1, suite.proof, height, emptyAddr), false},
{"cannot submit an empty proof", types.NewMsgTimeout(packet, 1, emptyProof, height, addr), false},
{"invalid packet", types.NewMsgTimeout(invalidPacket, 1, suite.proof, height, addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgTimeoutOnCloseValidateBasic() {
testCases := []struct {
name string
msg sdk.Msg
expPass bool
}{
{"success", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, addr), true},
{"empty proof", types.NewMsgTimeoutOnClose(packet, 1, emptyProof, suite.proof, height, addr), false},
{"empty proof close", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, emptyProof, height, addr), false},
{"proof height is zero", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, clienttypes.ZeroHeight(), addr), false},
{"signer address is empty", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, emptyAddr), false},
{"invalid packet", types.NewMsgTimeoutOnClose(invalidPacket, 1, suite.proof, suite.proof, height, addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
func (suite *TypesTestSuite) TestMsgAcknowledgementValidateBasic() {
testCases := []struct {
name string
msg *types.MsgAcknowledgement
expPass bool
}{
{"", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, addr), true},
{"proof height must be > 0", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, clienttypes.ZeroHeight(), addr), false},
{"missing signer address", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, emptyAddr), false},
{"cannot submit an empty proof", types.NewMsgAcknowledgement(packet, packet.GetData(), emptyProof, height, addr), false},
{"invalid packet", types.NewMsgAcknowledgement(invalidPacket, packet.GetData(), suite.proof, height, addr), false},
}
for _, tc := range testCases {
tc := tc
suite.Run(tc.name, func() {
err := tc.msg.ValidateBasic()
if tc.expPass {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
}
})
}
}
| 45.431111 | 219 | 0.733222 | [
"func TestTypesTestSuite(t *testing.T) {\n\tsuite.Run(t, new(TypesTestSuite))\n}"
] |
f7005398f3d6c86068ea5a7980d16fae06749079 | 523 | go | Go | pkg/event/call/write_test.go | andrewalexander/swoll | 4658a5b2ebfce04ae053180c39aa5ebc8a8d59ab | [
"Apache-2.0"
] | 71 | 2020-11-13T18:46:11.000Z | 2022-01-21T07:42:06.000Z | pkg/event/call/write_test.go | andrewalexander/swoll | 4658a5b2ebfce04ae053180c39aa5ebc8a8d59ab | [
"Apache-2.0"
] | 2 | 2021-02-03T21:51:20.000Z | 2021-02-06T04:57:40.000Z | pkg/event/call/write_test.go | andrewalexander/swoll | 4658a5b2ebfce04ae053180c39aa5ebc8a8d59ab | [
"Apache-2.0"
] | 8 | 2020-11-13T18:52:15.000Z | 2021-11-03T22:55:33.000Z | package call
import (
"reflect"
"testing"
"encoding/json"
"github.com/criticalstack/swoll/pkg/types"
)
func TestWrite(t *testing.T) {
s := &Write{
FD: types.InputFD(1),
Buf: types.Buffer([]byte("write test")),
Count: 12,
}
j, err := json.Marshal(s)
if err != nil {
t.Fatal(err)
}
var w Write
if err = json.Unmarshal(j, &w); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(s.Arguments(), w.Arguments()) {
t.Errorf("Was expecting %v, but got %v\n", s.Arguments(), w.Arguments())
}
}
| 14.942857 | 74 | 0.608031 | [
"func TestWrite(t *testing.T) {\n\n\ts := &Write{\n\t\tFD: types.InputFD(1),\n\t\tBuf: types.Buffer([]byte(\"write test\")),\n\t\tCount: 12,\n\t}\n\n\tj, err := json.Marshal(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar w Write\n\tif err = json.Unmarshal(j, &w); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(s.Arguments(), w.Arguments()) {\n\t\tt.Errorf(\"Was expecting %v, but got %v\\n\", s.Arguments(), w.Arguments())\n\t}\n\n}"
] |
f70060988c8c1a55e767054b32bd44ef28bb1317 | 2,587 | go | Go | pkg/policy/common_test.go | phantomnat/kubevela | c8264b8c3419f90d90a0f8f4470e0289a7d075df | [
"Apache-2.0"
] | 27 | 2020-08-06T02:39:48.000Z | 2020-09-08T12:56:43.000Z | pkg/policy/common_test.go | phantomnat/kubevela | c8264b8c3419f90d90a0f8f4470e0289a7d075df | [
"Apache-2.0"
] | 143 | 2020-07-31T04:23:16.000Z | 2020-09-10T07:15:20.000Z | pkg/policy/common_test.go | phantomnat/kubevela | c8264b8c3419f90d90a0f8f4470e0289a7d075df | [
"Apache-2.0"
] | 28 | 2020-07-03T06:13:56.000Z | 2020-09-09T09:53:48.000Z | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
func TestParseGarbageCollectPolicy(t *testing.T) {
r := require.New(t)
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Policies: []v1beta1.AppPolicy{{Type: "example"}},
}}
spec, err := ParseGarbageCollectPolicy(app)
r.NoError(err)
r.Nil(spec)
app.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{
Type: "garbage-collect",
Properties: &runtime.RawExtension{Raw: []byte("bad value")},
})
_, err = ParseGarbageCollectPolicy(app)
r.Error(err)
policySpec := &v1alpha1.GarbageCollectPolicySpec{
KeepLegacyResource: false,
Rules: []v1alpha1.GarbageCollectPolicyRule{{
Selector: v1alpha1.ResourcePolicyRuleSelector{TraitTypes: []string{"a"}},
Strategy: v1alpha1.GarbageCollectStrategyOnAppUpdate,
}, {
Selector: v1alpha1.ResourcePolicyRuleSelector{TraitTypes: []string{"b"}},
Strategy: v1alpha1.GarbageCollectStrategyNever,
}},
}
bs, err := json.Marshal(policySpec)
r.NoError(err)
app.Spec.Policies[1].Properties.Raw = bs
spec, err = ParseGarbageCollectPolicy(app)
r.NoError(err)
r.Equal(policySpec, spec)
}
func TestParseApplyOncePolicy(t *testing.T) {
r := require.New(t)
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Policies: []v1beta1.AppPolicy{{Type: "example"}},
}}
spec, err := ParseApplyOncePolicy(app)
r.NoError(err)
r.Nil(spec)
app.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{
Type: "apply-once",
Properties: &runtime.RawExtension{Raw: []byte("bad value")},
})
_, err = ParseApplyOncePolicy(app)
r.Error(err)
policySpec := &v1alpha1.ApplyOncePolicySpec{Enable: true}
bs, err := json.Marshal(policySpec)
r.NoError(err)
app.Spec.Policies[1].Properties.Raw = bs
spec, err = ParseApplyOncePolicy(app)
r.NoError(err)
r.Equal(policySpec, spec)
}
| 30.797619 | 76 | 0.743719 | [
"func TestParseGarbageCollectPolicy(t *testing.T) {\n\tr := require.New(t)\n\tapp := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{\n\t\tPolicies: []v1beta1.AppPolicy{{Type: \"example\"}},\n\t}}\n\tspec, err := ParseGarbageCollectPolicy(app)\n\tr.NoError(err)\n\tr.Nil(spec)\n\tapp.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{\n\t\tType: \"garbage-collect\",\n\t\tProperties: &runtime.RawExtension{Raw: []byte(\"bad value\")},\n\t})\n\t_, err = ParseGarbageCollectPolicy(app)\n\tr.Error(err)\n\tpolicySpec := &v1alpha1.GarbageCollectPolicySpec{\n\t\tKeepLegacyResource: false,\n\t\tRules: []v1alpha1.GarbageCollectPolicyRule{{\n\t\t\tSelector: v1alpha1.ResourcePolicyRuleSelector{TraitTypes: []string{\"a\"}},\n\t\t\tStrategy: v1alpha1.GarbageCollectStrategyOnAppUpdate,\n\t\t}, {\n\t\t\tSelector: v1alpha1.ResourcePolicyRuleSelector{TraitTypes: []string{\"b\"}},\n\t\t\tStrategy: v1alpha1.GarbageCollectStrategyNever,\n\t\t}},\n\t}\n\tbs, err := json.Marshal(policySpec)\n\tr.NoError(err)\n\tapp.Spec.Policies[1].Properties.Raw = bs\n\tspec, err = ParseGarbageCollectPolicy(app)\n\tr.NoError(err)\n\tr.Equal(policySpec, spec)\n}",
"func TestParseApplyOncePolicy(t *testing.T) {\n\tr := require.New(t)\n\tapp := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{\n\t\tPolicies: []v1beta1.AppPolicy{{Type: \"example\"}},\n\t}}\n\tspec, err := ParseApplyOncePolicy(app)\n\tr.NoError(err)\n\tr.Nil(spec)\n\tapp.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{\n\t\tType: \"apply-once\",\n\t\tProperties: &runtime.RawExtension{Raw: []byte(\"bad value\")},\n\t})\n\t_, err = ParseApplyOncePolicy(app)\n\tr.Error(err)\n\tpolicySpec := &v1alpha1.ApplyOncePolicySpec{Enable: true}\n\tbs, err := json.Marshal(policySpec)\n\tr.NoError(err)\n\tapp.Spec.Policies[1].Properties.Raw = bs\n\tspec, err = ParseApplyOncePolicy(app)\n\tr.NoError(err)\n\tr.Equal(policySpec, spec)\n}"
] |
f70065f28227c87a91b986c30479a44af840f5dc | 26,070 | go | Go | playground/backend/internal/code_processing/code_processing_test.go | viclai/beam | 872455570ae7f3e2e35360bccf93b503ae9fdb5c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | playground/backend/internal/code_processing/code_processing_test.go | viclai/beam | 872455570ae7f3e2e35360bccf93b503ae9fdb5c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2021-08-10T21:44:38.000Z | 2022-03-16T18:03:33.000Z | playground/backend/internal/code_processing/code_processing_test.go | viclai/beam | 872455570ae7f3e2e35360bccf93b503ae9fdb5c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2021-09-29T00:15:27.000Z | 2022-02-22T19:13:12.000Z | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package code_processing
import (
pb "beam.apache.org/playground/backend/internal/api/v1"
"beam.apache.org/playground/backend/internal/cache"
"beam.apache.org/playground/backend/internal/cache/local"
"beam.apache.org/playground/backend/internal/environment"
"beam.apache.org/playground/backend/internal/executors"
"beam.apache.org/playground/backend/internal/fs_tool"
"beam.apache.org/playground/backend/internal/utils"
"beam.apache.org/playground/backend/internal/validators"
"context"
"fmt"
"github.com/google/uuid"
"go.uber.org/goleak"
"io/fs"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"sync"
"testing"
"time"
)
const (
javaConfig = "{\n \"compile_cmd\": \"javac\",\n \"run_cmd\": \"java\",\n \"test_cmd\": \"java\",\n \"compile_args\": [\n \"-d\",\n \"bin\",\n \"-classpath\"\n ],\n \"run_args\": [\n \"-cp\",\n \"bin:\"\n ],\n \"test_args\": [\n \"-cp\",\n \"bin:\",\n \"JUnit\"\n ]\n}"
pythonConfig = "{\n \"compile_cmd\": \"\",\n \"run_cmd\": \"python3\",\n \"compile_args\": [],\n \"run_args\": []\n}"
goConfig = "{\n \"compile_cmd\": \"go\",\n \"run_cmd\": \"\",\n \"compile_args\": [\n \"build\",\n \"-o\",\n \"bin\"\n ],\n \"run_args\": [\n ]\n}"
fileName = "fakeFileName"
pipelinesFolder = "executable_files"
configFolder = "configs"
)
var opt goleak.Option
var cacheService cache.Cache
func TestMain(m *testing.M) {
setup()
opt = goleak.IgnoreCurrent()
exitValue := m.Run()
teardown()
os.Exit(exitValue)
}
func setup() {
// create configs for java
err := os.MkdirAll("configs", fs.ModePerm)
if err != nil {
panic(err)
}
filePath := filepath.Join("configs", pb.Sdk_SDK_JAVA.String()+".json")
err = os.WriteFile(filePath, []byte(javaConfig), 0600)
if err != nil {
panic(err)
}
path, err := os.Getwd()
if err != nil {
panic(err)
}
os.Setenv("BEAM_SDK", pb.Sdk_SDK_JAVA.String())
os.Setenv("APP_WORK_DIR", path)
cacheService = local.New(context.Background())
}
func teardown() {
err := os.RemoveAll(configFolder)
if err != nil {
panic(fmt.Errorf("error during test teardown: %s", err.Error()))
}
err = os.RemoveAll(pipelinesFolder)
if err != nil {
panic(fmt.Errorf("error during test teardown: %s", err.Error()))
}
os.Clearenv()
}
func fakeExecutableName(uuid.UUID, string) (string, error) {
return fileName, nil
}
func Test_Process(t *testing.T) {
defer goleak.VerifyNone(t, opt)
appEnvs, err := environment.GetApplicationEnvsFromOsEnvs()
if err != nil {
panic(err)
}
sdkEnv, err := environment.ConfigureBeamEnvs(appEnvs.WorkingDir())
if err != nil {
panic(err)
}
type args struct {
ctx context.Context
appEnv *environment.ApplicationEnvs
sdkEnv *environment.BeamEnvs
pipelineId uuid.UUID
pipelineOptions string
}
tests := []struct {
name string
createExecFile bool
code string
cancelFunc bool
expectedStatus pb.Status
expectedRunOutput interface{}
expectedRunError interface{}
expectedCompileOutput interface{}
args args
}{
{
// Test case with calling processCode method with small timeout.
// As a result status into cache should be set as Status_STATUS_RUN_TIMEOUT.
name: "small pipeline execution timeout",
createExecFile: false,
code: "",
cancelFunc: false,
expectedStatus: pb.Status_STATUS_RUN_TIMEOUT,
expectedCompileOutput: nil,
expectedRunOutput: nil,
expectedRunError: nil,
args: args{
ctx: context.Background(),
appEnv: &environment.ApplicationEnvs{},
sdkEnv: sdkEnv,
pipelineId: uuid.New(),
pipelineOptions: "",
},
},
{
// Test case with calling processCode method without preparing files with code.
// As a result status into cache should be set as Status_STATUS_VALIDATION_ERROR.
name: "validation failed",
createExecFile: false,
code: "",
cancelFunc: false,
expectedStatus: pb.Status_STATUS_VALIDATION_ERROR,
expectedCompileOutput: nil,
expectedRunOutput: nil,
expectedRunError: nil,
args: args{
ctx: context.Background(),
appEnv: appEnvs,
sdkEnv: sdkEnv,
pipelineId: uuid.New(),
pipelineOptions: "",
},
},
{
// Test case with calling processCode method with incorrect code.
// As a result status into cache should be set as Status_STATUS_COMPILE_ERROR.
name: "compilation failed",
createExecFile: true,
code: "MOCK_CODE",
cancelFunc: false,
expectedStatus: pb.Status_STATUS_COMPILE_ERROR,
expectedCompileOutput: "error: exit status 1\noutput: %s:1: error: reached end of file while parsing\nMOCK_CODE\n^\n1 error\n",
expectedRunOutput: nil,
expectedRunError: nil,
args: args{
ctx: context.Background(),
appEnv: appEnvs,
sdkEnv: sdkEnv,
pipelineId: uuid.New(),
pipelineOptions: "",
},
},
{
// Test case with calling processCode method with incorrect logic into code.
// As a result status into cache should be set as Status_STATUS_RUN_ERROR.
name: "run failed",
createExecFile: true,
code: "class HelloWorld {\n public static void main(String[] args) {\n System.out.println(1/0);\n }\n}",
cancelFunc: false,
expectedStatus: pb.Status_STATUS_RUN_ERROR,
expectedCompileOutput: "",
expectedRunOutput: "",
expectedRunError: "error: exit status 1\noutput: Exception in thread \"main\" java.lang.ArithmeticException: / by zero\n\tat HelloWorld.main(%s.java:3)\n",
args: args{
ctx: context.Background(),
appEnv: appEnvs,
sdkEnv: sdkEnv,
pipelineId: uuid.New(),
pipelineOptions: "",
},
},
{
// Test case with calling processCode with canceling code processing.
// As a result status into cache should be set as Status_STATUS_CANCELED.
name: "cancel",
createExecFile: true,
code: "class HelloWorld {\n public static void main(String[] args) {\n while(true){}\n }\n}",
cancelFunc: true,
expectedStatus: pb.Status_STATUS_CANCELED,
expectedCompileOutput: "",
expectedRunOutput: "",
args: args{
ctx: context.Background(),
appEnv: appEnvs,
sdkEnv: sdkEnv,
pipelineId: uuid.New(),
pipelineOptions: "",
},
},
{
// Test case with calling processCode without any error cases.
// As a result status into cache should be set as Status_STATUS_FINISHED.
name: "processing complete successfully",
createExecFile: true,
cancelFunc: false,
code: "class HelloWorld {\n public static void main(String[] args) {\n System.out.println(\"Hello world!\");\n }\n}",
expectedStatus: pb.Status_STATUS_FINISHED,
expectedCompileOutput: "",
expectedRunOutput: "Hello world!\n",
expectedRunError: nil,
args: args{
ctx: context.Background(),
appEnv: appEnvs,
sdkEnv: sdkEnv,
pipelineId: uuid.New(),
pipelineOptions: "",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
lc, _ := fs_tool.NewLifeCycle(pb.Sdk_SDK_JAVA, tt.args.pipelineId, filepath.Join(os.Getenv("APP_WORK_DIR"), pipelinesFolder))
err := lc.CreateFolders()
if err != nil {
t.Fatalf("error during prepare folders: %s", err.Error())
}
if tt.createExecFile {
_ = lc.CreateSourceCodeFile(tt.code)
}
if err = utils.SetToCache(tt.args.ctx, cacheService, tt.args.pipelineId, cache.Canceled, false); err != nil {
t.Fatal("error during set cancel flag to cache")
}
if tt.cancelFunc {
go func(ctx context.Context, pipelineId uuid.UUID) {
// to imitate behavior of cancellation
time.Sleep(5 * time.Second)
cacheService.SetValue(ctx, pipelineId, cache.Canceled, true)
}(tt.args.ctx, tt.args.pipelineId)
}
Process(tt.args.ctx, cacheService, lc, tt.args.pipelineId, tt.args.appEnv, tt.args.sdkEnv, tt.args.pipelineOptions)
status, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.Status)
if !reflect.DeepEqual(status, tt.expectedStatus) {
t.Errorf("processCode() set status: %s, but expectes: %s", status, tt.expectedStatus)
}
compileOutput, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.CompileOutput)
if tt.expectedCompileOutput != nil && strings.Contains(tt.expectedCompileOutput.(string), "%s") {
tt.expectedCompileOutput = fmt.Sprintf(tt.expectedCompileOutput.(string), lc.Paths.AbsoluteSourceFilePath)
}
if !reflect.DeepEqual(compileOutput, tt.expectedCompileOutput) {
t.Errorf("processCode() set compileOutput: %s, but expectes: %s", compileOutput, tt.expectedCompileOutput)
}
runOutput, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.RunOutput)
if !reflect.DeepEqual(runOutput, tt.expectedRunOutput) {
t.Errorf("processCode() set runOutput: %s, but expectes: %s", runOutput, tt.expectedRunOutput)
}
runError, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.RunError)
if tt.expectedRunError != nil && strings.Contains(tt.expectedRunError.(string), "%s") {
tt.expectedRunError = fmt.Sprintf(tt.expectedRunError.(string), tt.args.pipelineId)
}
if !reflect.DeepEqual(runError, tt.expectedRunError) {
t.Errorf("processCode() set runError: %s, but expectes: %s", runError, tt.expectedRunError)
}
})
}
}
func TestGetProcessingOutput(t *testing.T) {
defer goleak.VerifyNone(t, opt)
pipelineId := uuid.New()
incorrectConvertPipelineId := uuid.New()
err := cacheService.SetValue(context.Background(), pipelineId, cache.RunOutput, "MOCK_RUN_OUTPUT")
if err != nil {
panic(err)
}
err = cacheService.SetValue(context.Background(), incorrectConvertPipelineId, cache.RunOutput, cache.RunOutput)
if err != nil {
panic(err)
}
type args struct {
ctx context.Context
cacheService cache.Cache
key uuid.UUID
subKey cache.SubKey
errorTitle string
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
// Test case with calling GetProcessingOutput with pipelineId which doesn't contain run output.
// As a result, want to receive an error.
name: "get run output with incorrect pipelineId",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: uuid.New(),
subKey: cache.RunOutput,
errorTitle: "",
},
want: "",
wantErr: true,
},
{
// Test case with calling GetProcessingOutput with pipelineId which contains incorrect run output.
// As a result, want to receive an error.
name: "get run output with incorrect run output",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: incorrectConvertPipelineId,
subKey: cache.RunOutput,
errorTitle: "",
},
want: "",
wantErr: true,
},
{
// Test case with calling GetProcessingOutput with pipelineId which contains run output.
// As a result, want to receive an expected string.
name: "get run output with correct pipelineId",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: pipelineId,
subKey: cache.RunOutput,
errorTitle: "",
},
want: "MOCK_RUN_OUTPUT",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetProcessingOutput(tt.args.ctx, tt.args.cacheService, tt.args.key, tt.args.subKey, tt.args.errorTitle)
if (err != nil) != tt.wantErr {
t.Errorf("GetProcessingOutput() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetProcessingOutput() got = %v, want %v", got, tt.want)
}
})
}
}
func TestGetProcessingStatus(t *testing.T) {
defer goleak.VerifyNone(t, opt)
pipelineId := uuid.New()
incorrectConvertPipelineId := uuid.New()
err := cacheService.SetValue(context.Background(), pipelineId, cache.Status, pb.Status_STATUS_FINISHED)
if err != nil {
panic(err)
}
err = cacheService.SetValue(context.Background(), incorrectConvertPipelineId, cache.Status, "MOCK_STATUS")
if err != nil {
panic(err)
}
type args struct {
ctx context.Context
cacheService cache.Cache
key uuid.UUID
errorTitle string
}
tests := []struct {
name string
args args
want pb.Status
wantErr bool
}{
{
// Test case with calling GetProcessingStatus with pipelineId which doesn't contain status.
// As a result, want to receive an error.
name: "get status with incorrect pipelineId",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: uuid.New(),
errorTitle: "",
},
want: pb.Status_STATUS_UNSPECIFIED,
wantErr: true,
},
{
// Test case with calling GetProcessingStatus with pipelineId which contains incorrect status value in cache.
// As a result, want to receive an error.
name: "get status with incorrect cache value",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: incorrectConvertPipelineId,
errorTitle: "",
},
want: pb.Status_STATUS_UNSPECIFIED,
wantErr: true,
},
{
// Test case with calling GetProcessingStatus with pipelineId which contains status.
// As a result, want to receive an expected status.
name: "get status with correct pipelineId",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: pipelineId,
errorTitle: "",
},
want: pb.Status_STATUS_FINISHED,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetProcessingStatus(tt.args.ctx, tt.args.cacheService, tt.args.key, tt.args.errorTitle)
if (err != nil) != tt.wantErr {
t.Errorf("GetProcessingStatus() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetProcessingStatus() got = %v, want %v", got, tt.want)
}
})
}
}
func TestGetLastIndex(t *testing.T) {
defer goleak.VerifyNone(t, opt)
pipelineId := uuid.New()
incorrectConvertPipelineId := uuid.New()
err := cacheService.SetValue(context.Background(), pipelineId, cache.RunOutputIndex, 2)
if err != nil {
panic(err)
}
type args struct {
ctx context.Context
cacheService cache.Cache
key uuid.UUID
subKey cache.SubKey
errorTitle string
}
tests := []struct {
name string
args args
want int
wantErr bool
}{
{
// Test case with calling GetLastIndex with pipelineId which doesn't contain last index.
// As a result, want to receive an error.
name: "get last index with incorrect pipelineId",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: uuid.New(),
subKey: cache.RunOutputIndex,
errorTitle: "",
},
want: 0,
wantErr: true,
},
{
// Test case with calling GetLastIndex with pipelineId which contains incorrect status value in cache.
// As a result, want to receive an error.
name: "get last index with incorrect cache value",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: incorrectConvertPipelineId,
subKey: cache.RunOutputIndex,
errorTitle: "",
},
want: 0,
wantErr: true,
},
{
// Test case with calling GetLastIndex with pipelineId which contains last index.
// As a result, want to receive an expected last index.
name: "get last index with correct pipelineId",
args: args{
ctx: context.Background(),
cacheService: cacheService,
key: pipelineId,
subKey: cache.RunOutputIndex,
errorTitle: "",
},
want: 2,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetLastIndex(tt.args.ctx, tt.args.cacheService, tt.args.key, tt.args.subKey, tt.args.errorTitle)
if (err != nil) != tt.wantErr {
t.Errorf("GetLastIndex() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetLastIndex() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_setJavaExecutableFile(t *testing.T) {
pipelineId := uuid.New()
lc, _ := fs_tool.NewLifeCycle(pb.Sdk_SDK_JAVA, pipelineId, filepath.Join(os.Getenv("APP_WORK_DIR"), pipelinesFolder))
lc.Paths.ExecutableName = fakeExecutableName
executorBuilder := executors.NewExecutorBuilder().WithRunner().WithCommand("fake cmd").ExecutorBuilder
type args struct {
lc *fs_tool.LifeCycle
id uuid.UUID
service cache.Cache
ctx context.Context
executorBuilder *executors.ExecutorBuilder
dir string
}
tests := []struct {
name string
args args
want executors.Executor
wantErr bool
}{
{
name: "set executable name to runner",
args: args{
lc: lc,
id: pipelineId,
service: cacheService,
ctx: context.Background(),
executorBuilder: &executorBuilder,
dir: pipelinesFolder,
},
want: executors.NewExecutorBuilder().
WithExecutableFileName(fileName).
WithRunner().
WithCommand("fake cmd").
WithTestRunner().
Build(),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := setJavaExecutableFile(tt.args.lc.Paths, tt.args.id, tt.args.service, tt.args.ctx, tt.args.executorBuilder, tt.args.dir)
if (err != nil) != tt.wantErr {
t.Errorf("setJavaExecutableFile() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("setJavaExecutableFile() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getRunOrTestCmd(t *testing.T) {
unitTests := sync.Map{}
unitTests.Store(validators.UnitTestValidatorName, true)
notUnitTests := sync.Map{}
notUnitTests.Store(validators.UnitTestValidatorName, false)
runEx := executors.NewExecutorBuilder().
WithRunner().
WithCommand("runCommand").
WithArgs([]string{"arg1"}).
WithPipelineOptions([]string{""}).
Build()
testEx := executors.NewExecutorBuilder().
WithTestRunner().
WithCommand("testCommand").
WithArgs([]string{"arg1"}).
Build()
wantRunExec := exec.CommandContext(context.Background(), "runCommand", "arg1")
wantTestExec := exec.CommandContext(context.Background(), "testCommand", "arg1", "")
type args struct {
valResult *sync.Map
executor *executors.Executor
ctxWithTimeout context.Context
}
tests := []struct {
name string
args args
want *exec.Cmd
}{
{
//Get cmd objects with set run executor
name: "get run cmd",
args: args{
valResult: ¬UnitTests,
executor: &runEx,
ctxWithTimeout: context.Background(),
},
want: wantRunExec,
},
{
//Get cmd objects with set test executor
name: "get test cmd",
args: args{
valResult: &unitTests,
executor: &testEx,
ctxWithTimeout: context.Background(),
},
want: wantTestExec,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getExecuteCmd(tt.args.valResult, tt.args.executor, tt.args.ctxWithTimeout); !reflect.DeepEqual(got, tt.want) {
t.Errorf("getExecuteCmd() = %v, want %v", got, tt.want)
}
})
}
}
func setupBenchmarks(sdk pb.Sdk) {
err := os.MkdirAll(configFolder, fs.ModePerm)
if err != nil {
panic(err)
}
filePath := filepath.Join(configFolder, sdk.String()+".json")
switch sdk {
case pb.Sdk_SDK_JAVA:
err = os.WriteFile(filePath, []byte(javaConfig), 0600)
case pb.Sdk_SDK_PYTHON:
err = os.WriteFile(filePath, []byte(pythonConfig), 0600)
case pb.Sdk_SDK_GO:
err = os.WriteFile(filePath, []byte(goConfig), 0600)
}
if err != nil {
panic(err)
}
os.Setenv("BEAM_SDK", sdk.String())
os.Setenv("APP_WORK_DIR", "")
os.Setenv("PREPARED_MOD_DIR", "")
cacheService = local.New(context.Background())
}
func teardownBenchmarks() {
err := os.RemoveAll(configFolder)
if err != nil {
panic(fmt.Errorf("error during test teardown: %s", err.Error()))
}
err = os.RemoveAll(pipelinesFolder)
if err != nil {
panic(fmt.Errorf("error during test teardown: %s", err.Error()))
}
}
func prepareFiles(b *testing.B, pipelineId uuid.UUID, code string, sdk pb.Sdk) *fs_tool.LifeCycle {
lc, err := fs_tool.NewLifeCycle(sdk, pipelineId, pipelinesFolder)
if err != nil {
b.Fatalf("error during initializse lc: %s", err.Error())
}
err = lc.CreateFolders()
if err != nil {
b.Fatalf("error during prepare folders: %s", err.Error())
}
err = lc.CreateSourceCodeFile(code)
if err != nil {
b.Fatalf("error during prepare source code file: %s", err.Error())
}
return lc
}
func Benchmark_ProcessJava(b *testing.B) {
setupBenchmarks(pb.Sdk_SDK_JAVA)
defer teardownBenchmarks()
appEnv, err := environment.GetApplicationEnvsFromOsEnvs()
if err != nil {
b.Fatalf("error during preparing appEnv: %s", err)
}
sdkEnv, err := environment.ConfigureBeamEnvs(appEnv.WorkingDir())
if err != nil {
b.Fatalf("error during preparing sdkEnv: %s", err)
}
ctx := context.Background()
code := "class HelloWorld {\n public static void main(String[] args) {\n System.out.println(\"Hello world!\");\n }\n}"
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
pipelineId := uuid.New()
lc := prepareFiles(b, pipelineId, code, pb.Sdk_SDK_JAVA)
if err = utils.SetToCache(ctx, cacheService, pipelineId, cache.Canceled, false); err != nil {
b.Fatal("error during set cancel flag to cache")
}
b.StartTimer()
Process(ctx, cacheService, lc, pipelineId, appEnv, sdkEnv, "")
}
}
func Benchmark_ProcessPython(b *testing.B) {
setupBenchmarks(pb.Sdk_SDK_PYTHON)
defer teardownBenchmarks()
appEnv, err := environment.GetApplicationEnvsFromOsEnvs()
if err != nil {
b.Fatalf("error during preparing appEnv: %s", err)
}
sdkEnv, err := environment.ConfigureBeamEnvs(appEnv.WorkingDir())
if err != nil {
b.Fatalf("error during preparing sdkEnv: %s", err)
}
ctx := context.Background()
code := "if __name__ == \"__main__\":\n print(\"Hello world!\")\n"
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
pipelineId := uuid.New()
lc := prepareFiles(b, pipelineId, code, pb.Sdk_SDK_PYTHON)
if err = utils.SetToCache(ctx, cacheService, pipelineId, cache.Canceled, false); err != nil {
b.Fatal("error during set cancel flag to cache")
}
b.StartTimer()
Process(ctx, cacheService, lc, pipelineId, appEnv, sdkEnv, "")
}
}
func Benchmark_ProcessGo(b *testing.B) {
setupBenchmarks(pb.Sdk_SDK_GO)
defer teardownBenchmarks()
appEnv, err := environment.GetApplicationEnvsFromOsEnvs()
if err != nil {
b.Fatalf("error during preparing appEnv: %s", err)
}
sdkEnv, err := environment.ConfigureBeamEnvs(appEnv.WorkingDir())
if err != nil {
b.Fatalf("error during preparing sdkEnv: %s", err)
}
ctx := context.Background()
code := "package main\n\nimport \"fmt\"\n\nfunc main() {\n fmt.Println(\"Hello world!\")\n}"
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
pipelineId := uuid.New()
lc := prepareFiles(b, pipelineId, code, pb.Sdk_SDK_GO)
if err = utils.SetToCache(ctx, cacheService, pipelineId, cache.Canceled, false); err != nil {
b.Fatal("error during set cancel flag to cache")
}
b.StartTimer()
Process(ctx, cacheService, lc, pipelineId, appEnv, sdkEnv, "")
}
}
func Benchmark_GetProcessingOutput(b *testing.B) {
pipelineId := uuid.New()
subKey := cache.RunOutput
ctx := context.Background()
err := cacheService.SetValue(ctx, pipelineId, subKey, "MOCK_RUN_OUTPUT")
if err != nil {
b.Fatalf("error during prepare cache value: %s", err.Error())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = GetProcessingOutput(ctx, cacheService, pipelineId, subKey, "")
}
}
func Benchmark_GetProcessingStatus(b *testing.B) {
pipelineId := uuid.New()
subKey := cache.Status
ctx := context.Background()
err := cacheService.SetValue(ctx, pipelineId, subKey, pb.Status_STATUS_FINISHED)
if err != nil {
b.Fatalf("error during prepare cache value: %s", err.Error())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = GetProcessingStatus(ctx, cacheService, pipelineId, "")
}
}
func Benchmark_GetLastIndex(b *testing.B) {
pipelineId := uuid.New()
subKey := cache.RunOutputIndex
ctx := context.Background()
err := cacheService.SetValue(ctx, pipelineId, subKey, 5)
if err != nil {
b.Fatalf("error during prepare cache value: %s", err.Error())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = GetLastIndex(ctx, cacheService, pipelineId, subKey, "")
}
}
| 31.072706 | 307 | 0.647718 | [
"func Test_Process(t *testing.T) {\n\tdefer goleak.VerifyNone(t, opt)\n\tappEnvs, err := environment.GetApplicationEnvsFromOsEnvs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsdkEnv, err := environment.ConfigureBeamEnvs(appEnvs.WorkingDir())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttype args struct {\n\t\tctx context.Context\n\t\tappEnv *environment.ApplicationEnvs\n\t\tsdkEnv *environment.BeamEnvs\n\t\tpipelineId uuid.UUID\n\t\tpipelineOptions string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\tcreateExecFile bool\n\t\tcode string\n\t\tcancelFunc bool\n\t\texpectedStatus pb.Status\n\t\texpectedRunOutput interface{}\n\t\texpectedRunError interface{}\n\t\texpectedCompileOutput interface{}\n\t\targs args\n\t}{\n\t\t{\n\t\t\t// Test case with calling processCode method with small timeout.\n\t\t\t// As a result status into cache should be set as Status_STATUS_RUN_TIMEOUT.\n\t\t\tname: \"small pipeline execution timeout\",\n\t\t\tcreateExecFile: false,\n\t\t\tcode: \"\",\n\t\t\tcancelFunc: false,\n\t\t\texpectedStatus: pb.Status_STATUS_RUN_TIMEOUT,\n\t\t\texpectedCompileOutput: nil,\n\t\t\texpectedRunOutput: nil,\n\t\t\texpectedRunError: nil,\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tappEnv: &environment.ApplicationEnvs{},\n\t\t\t\tsdkEnv: sdkEnv,\n\t\t\t\tpipelineId: uuid.New(),\n\t\t\t\tpipelineOptions: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Test case with calling processCode method without preparing files with code.\n\t\t\t// As a result status into cache should be set as Status_STATUS_VALIDATION_ERROR.\n\t\t\tname: \"validation failed\",\n\t\t\tcreateExecFile: false,\n\t\t\tcode: \"\",\n\t\t\tcancelFunc: false,\n\t\t\texpectedStatus: pb.Status_STATUS_VALIDATION_ERROR,\n\t\t\texpectedCompileOutput: nil,\n\t\t\texpectedRunOutput: nil,\n\t\t\texpectedRunError: nil,\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tappEnv: appEnvs,\n\t\t\t\tsdkEnv: sdkEnv,\n\t\t\t\tpipelineId: uuid.New(),\n\t\t\t\tpipelineOptions: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Test case with calling processCode method with incorrect code.\n\t\t\t// As a result status into cache should be set as Status_STATUS_COMPILE_ERROR.\n\t\t\tname: \"compilation failed\",\n\t\t\tcreateExecFile: true,\n\t\t\tcode: \"MOCK_CODE\",\n\t\t\tcancelFunc: false,\n\t\t\texpectedStatus: pb.Status_STATUS_COMPILE_ERROR,\n\t\t\texpectedCompileOutput: \"error: exit status 1\\noutput: %s:1: error: reached end of file while parsing\\nMOCK_CODE\\n^\\n1 error\\n\",\n\t\t\texpectedRunOutput: nil,\n\t\t\texpectedRunError: nil,\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tappEnv: appEnvs,\n\t\t\t\tsdkEnv: sdkEnv,\n\t\t\t\tpipelineId: uuid.New(),\n\t\t\t\tpipelineOptions: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Test case with calling processCode method with incorrect logic into code.\n\t\t\t// As a result status into cache should be set as Status_STATUS_RUN_ERROR.\n\t\t\tname: \"run failed\",\n\t\t\tcreateExecFile: true,\n\t\t\tcode: \"class HelloWorld {\\n public static void main(String[] args) {\\n System.out.println(1/0);\\n }\\n}\",\n\t\t\tcancelFunc: false,\n\t\t\texpectedStatus: pb.Status_STATUS_RUN_ERROR,\n\t\t\texpectedCompileOutput: \"\",\n\t\t\texpectedRunOutput: \"\",\n\t\t\texpectedRunError: \"error: exit status 1\\noutput: Exception in thread \\\"main\\\" java.lang.ArithmeticException: / by zero\\n\\tat HelloWorld.main(%s.java:3)\\n\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tappEnv: appEnvs,\n\t\t\t\tsdkEnv: sdkEnv,\n\t\t\t\tpipelineId: uuid.New(),\n\t\t\t\tpipelineOptions: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Test case with calling processCode with canceling code processing.\n\t\t\t// As a result status into cache should be set as Status_STATUS_CANCELED.\n\t\t\tname: \"cancel\",\n\t\t\tcreateExecFile: true,\n\t\t\tcode: \"class HelloWorld {\\n public static void main(String[] args) {\\n while(true){}\\n }\\n}\",\n\t\t\tcancelFunc: true,\n\t\t\texpectedStatus: pb.Status_STATUS_CANCELED,\n\t\t\texpectedCompileOutput: \"\",\n\t\t\texpectedRunOutput: \"\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tappEnv: appEnvs,\n\t\t\t\tsdkEnv: sdkEnv,\n\t\t\t\tpipelineId: uuid.New(),\n\t\t\t\tpipelineOptions: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Test case with calling processCode without any error cases.\n\t\t\t// As a result status into cache should be set as Status_STATUS_FINISHED.\n\t\t\tname: \"processing complete successfully\",\n\t\t\tcreateExecFile: true,\n\t\t\tcancelFunc: false,\n\t\t\tcode: \"class HelloWorld {\\n public static void main(String[] args) {\\n System.out.println(\\\"Hello world!\\\");\\n }\\n}\",\n\t\t\texpectedStatus: pb.Status_STATUS_FINISHED,\n\t\t\texpectedCompileOutput: \"\",\n\t\t\texpectedRunOutput: \"Hello world!\\n\",\n\t\t\texpectedRunError: nil,\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tappEnv: appEnvs,\n\t\t\t\tsdkEnv: sdkEnv,\n\t\t\t\tpipelineId: uuid.New(),\n\t\t\t\tpipelineOptions: \"\",\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlc, _ := fs_tool.NewLifeCycle(pb.Sdk_SDK_JAVA, tt.args.pipelineId, filepath.Join(os.Getenv(\"APP_WORK_DIR\"), pipelinesFolder))\n\t\t\terr := lc.CreateFolders()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error during prepare folders: %s\", err.Error())\n\t\t\t}\n\t\t\tif tt.createExecFile {\n\t\t\t\t_ = lc.CreateSourceCodeFile(tt.code)\n\t\t\t}\n\t\t\tif err = utils.SetToCache(tt.args.ctx, cacheService, tt.args.pipelineId, cache.Canceled, false); err != nil {\n\t\t\t\tt.Fatal(\"error during set cancel flag to cache\")\n\t\t\t}\n\t\t\tif tt.cancelFunc {\n\t\t\t\tgo func(ctx context.Context, pipelineId uuid.UUID) {\n\t\t\t\t\t// to imitate behavior of cancellation\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t\tcacheService.SetValue(ctx, pipelineId, cache.Canceled, true)\n\t\t\t\t}(tt.args.ctx, tt.args.pipelineId)\n\t\t\t}\n\t\t\tProcess(tt.args.ctx, cacheService, lc, tt.args.pipelineId, tt.args.appEnv, tt.args.sdkEnv, tt.args.pipelineOptions)\n\n\t\t\tstatus, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.Status)\n\t\t\tif !reflect.DeepEqual(status, tt.expectedStatus) {\n\t\t\t\tt.Errorf(\"processCode() set status: %s, but expectes: %s\", status, tt.expectedStatus)\n\t\t\t}\n\n\t\t\tcompileOutput, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.CompileOutput)\n\t\t\tif tt.expectedCompileOutput != nil && strings.Contains(tt.expectedCompileOutput.(string), \"%s\") {\n\t\t\t\ttt.expectedCompileOutput = fmt.Sprintf(tt.expectedCompileOutput.(string), lc.Paths.AbsoluteSourceFilePath)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(compileOutput, tt.expectedCompileOutput) {\n\t\t\t\tt.Errorf(\"processCode() set compileOutput: %s, but expectes: %s\", compileOutput, tt.expectedCompileOutput)\n\t\t\t}\n\n\t\t\trunOutput, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.RunOutput)\n\t\t\tif !reflect.DeepEqual(runOutput, tt.expectedRunOutput) {\n\t\t\t\tt.Errorf(\"processCode() set runOutput: %s, but expectes: %s\", runOutput, tt.expectedRunOutput)\n\t\t\t}\n\n\t\t\trunError, _ := cacheService.GetValue(tt.args.ctx, tt.args.pipelineId, cache.RunError)\n\t\t\tif tt.expectedRunError != nil && strings.Contains(tt.expectedRunError.(string), \"%s\") {\n\t\t\t\ttt.expectedRunError = fmt.Sprintf(tt.expectedRunError.(string), tt.args.pipelineId)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(runError, tt.expectedRunError) {\n\t\t\t\tt.Errorf(\"processCode() set runError: %s, but expectes: %s\", runError, tt.expectedRunError)\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestGetProcessingOutput(t *testing.T) {\n\tdefer goleak.VerifyNone(t, opt)\n\tpipelineId := uuid.New()\n\tincorrectConvertPipelineId := uuid.New()\n\terr := cacheService.SetValue(context.Background(), pipelineId, cache.RunOutput, \"MOCK_RUN_OUTPUT\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = cacheService.SetValue(context.Background(), incorrectConvertPipelineId, cache.RunOutput, cache.RunOutput)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttype args struct {\n\t\tctx context.Context\n\t\tcacheService cache.Cache\n\t\tkey uuid.UUID\n\t\tsubKey cache.SubKey\n\t\terrorTitle string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\t// Test case with calling GetProcessingOutput with pipelineId which doesn't contain run output.\n\t\t\t// As a result, want to receive an error.\n\t\t\tname: \"get run output with incorrect pipelineId\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: uuid.New(),\n\t\t\t\tsubKey: cache.RunOutput,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: \"\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\t// Test case with calling GetProcessingOutput with pipelineId which contains incorrect run output.\n\t\t\t// As a result, want to receive an error.\n\t\t\tname: \"get run output with incorrect run output\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: incorrectConvertPipelineId,\n\t\t\t\tsubKey: cache.RunOutput,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: \"\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\t// Test case with calling GetProcessingOutput with pipelineId which contains run output.\n\t\t\t// As a result, want to receive an expected string.\n\t\t\tname: \"get run output with correct pipelineId\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: pipelineId,\n\t\t\t\tsubKey: cache.RunOutput,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: \"MOCK_RUN_OUTPUT\",\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := GetProcessingOutput(tt.args.ctx, tt.args.cacheService, tt.args.key, tt.args.subKey, tt.args.errorTitle)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"GetProcessingOutput() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"GetProcessingOutput() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestGetProcessingStatus(t *testing.T) {\n\tdefer goleak.VerifyNone(t, opt)\n\tpipelineId := uuid.New()\n\tincorrectConvertPipelineId := uuid.New()\n\terr := cacheService.SetValue(context.Background(), pipelineId, cache.Status, pb.Status_STATUS_FINISHED)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = cacheService.SetValue(context.Background(), incorrectConvertPipelineId, cache.Status, \"MOCK_STATUS\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttype args struct {\n\t\tctx context.Context\n\t\tcacheService cache.Cache\n\t\tkey uuid.UUID\n\t\terrorTitle string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant pb.Status\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\t// Test case with calling GetProcessingStatus with pipelineId which doesn't contain status.\n\t\t\t// As a result, want to receive an error.\n\t\t\tname: \"get status with incorrect pipelineId\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: uuid.New(),\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: pb.Status_STATUS_UNSPECIFIED,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\t// Test case with calling GetProcessingStatus with pipelineId which contains incorrect status value in cache.\n\t\t\t// As a result, want to receive an error.\n\t\t\tname: \"get status with incorrect cache value\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: incorrectConvertPipelineId,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: pb.Status_STATUS_UNSPECIFIED,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\t// Test case with calling GetProcessingStatus with pipelineId which contains status.\n\t\t\t// As a result, want to receive an expected status.\n\t\t\tname: \"get status with correct pipelineId\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: pipelineId,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: pb.Status_STATUS_FINISHED,\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := GetProcessingStatus(tt.args.ctx, tt.args.cacheService, tt.args.key, tt.args.errorTitle)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"GetProcessingStatus() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"GetProcessingStatus() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestGetLastIndex(t *testing.T) {\n\tdefer goleak.VerifyNone(t, opt)\n\tpipelineId := uuid.New()\n\tincorrectConvertPipelineId := uuid.New()\n\terr := cacheService.SetValue(context.Background(), pipelineId, cache.RunOutputIndex, 2)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttype args struct {\n\t\tctx context.Context\n\t\tcacheService cache.Cache\n\t\tkey uuid.UUID\n\t\tsubKey cache.SubKey\n\t\terrorTitle string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant int\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\t// Test case with calling GetLastIndex with pipelineId which doesn't contain last index.\n\t\t\t// As a result, want to receive an error.\n\t\t\tname: \"get last index with incorrect pipelineId\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: uuid.New(),\n\t\t\t\tsubKey: cache.RunOutputIndex,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\t// Test case with calling GetLastIndex with pipelineId which contains incorrect status value in cache.\n\t\t\t// As a result, want to receive an error.\n\t\t\tname: \"get last index with incorrect cache value\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: incorrectConvertPipelineId,\n\t\t\t\tsubKey: cache.RunOutputIndex,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\t// Test case with calling GetLastIndex with pipelineId which contains last index.\n\t\t\t// As a result, want to receive an expected last index.\n\t\t\tname: \"get last index with correct pipelineId\",\n\t\t\targs: args{\n\t\t\t\tctx: context.Background(),\n\t\t\t\tcacheService: cacheService,\n\t\t\t\tkey: pipelineId,\n\t\t\t\tsubKey: cache.RunOutputIndex,\n\t\t\t\terrorTitle: \"\",\n\t\t\t},\n\t\t\twant: 2,\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := GetLastIndex(tt.args.ctx, tt.args.cacheService, tt.args.key, tt.args.subKey, tt.args.errorTitle)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"GetLastIndex() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"GetLastIndex() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}",
"func Test_setJavaExecutableFile(t *testing.T) {\n\tpipelineId := uuid.New()\n\tlc, _ := fs_tool.NewLifeCycle(pb.Sdk_SDK_JAVA, pipelineId, filepath.Join(os.Getenv(\"APP_WORK_DIR\"), pipelinesFolder))\n\tlc.Paths.ExecutableName = fakeExecutableName\n\texecutorBuilder := executors.NewExecutorBuilder().WithRunner().WithCommand(\"fake cmd\").ExecutorBuilder\n\ttype args struct {\n\t\tlc *fs_tool.LifeCycle\n\t\tid uuid.UUID\n\t\tservice cache.Cache\n\t\tctx context.Context\n\t\texecutorBuilder *executors.ExecutorBuilder\n\t\tdir string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant executors.Executor\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"set executable name to runner\",\n\t\t\targs: args{\n\t\t\t\tlc: lc,\n\t\t\t\tid: pipelineId,\n\t\t\t\tservice: cacheService,\n\t\t\t\tctx: context.Background(),\n\t\t\t\texecutorBuilder: &executorBuilder,\n\t\t\t\tdir: pipelinesFolder,\n\t\t\t},\n\t\t\twant: executors.NewExecutorBuilder().\n\t\t\t\tWithExecutableFileName(fileName).\n\t\t\t\tWithRunner().\n\t\t\t\tWithCommand(\"fake cmd\").\n\t\t\t\tWithTestRunner().\n\t\t\t\tBuild(),\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := setJavaExecutableFile(tt.args.lc.Paths, tt.args.id, tt.args.service, tt.args.ctx, tt.args.executorBuilder, tt.args.dir)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"setJavaExecutableFile() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"setJavaExecutableFile() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}",
"func Test_getRunOrTestCmd(t *testing.T) {\n\tunitTests := sync.Map{}\n\tunitTests.Store(validators.UnitTestValidatorName, true)\n\n\tnotUnitTests := sync.Map{}\n\tnotUnitTests.Store(validators.UnitTestValidatorName, false)\n\n\trunEx := executors.NewExecutorBuilder().\n\t\tWithRunner().\n\t\tWithCommand(\"runCommand\").\n\t\tWithArgs([]string{\"arg1\"}).\n\t\tWithPipelineOptions([]string{\"\"}).\n\t\tBuild()\n\n\ttestEx := executors.NewExecutorBuilder().\n\t\tWithTestRunner().\n\t\tWithCommand(\"testCommand\").\n\t\tWithArgs([]string{\"arg1\"}).\n\t\tBuild()\n\n\twantRunExec := exec.CommandContext(context.Background(), \"runCommand\", \"arg1\")\n\twantTestExec := exec.CommandContext(context.Background(), \"testCommand\", \"arg1\", \"\")\n\n\ttype args struct {\n\t\tvalResult *sync.Map\n\t\texecutor *executors.Executor\n\t\tctxWithTimeout context.Context\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant *exec.Cmd\n\t}{\n\t\t{\n\t\t\t//Get cmd objects with set run executor\n\t\t\tname: \"get run cmd\",\n\t\t\targs: args{\n\t\t\t\tvalResult: ¬UnitTests,\n\t\t\t\texecutor: &runEx,\n\t\t\t\tctxWithTimeout: context.Background(),\n\t\t\t},\n\t\t\twant: wantRunExec,\n\t\t},\n\t\t{\n\t\t\t//Get cmd objects with set test executor\n\t\t\tname: \"get test cmd\",\n\t\t\targs: args{\n\t\t\t\tvalResult: &unitTests,\n\t\t\t\texecutor: &testEx,\n\t\t\t\tctxWithTimeout: context.Background(),\n\t\t\t},\n\t\t\twant: wantTestExec,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := getExecuteCmd(tt.args.valResult, tt.args.executor, tt.args.ctxWithTimeout); !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getExecuteCmd() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}"
] |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 4