From d14a74cc09c7f049eb4aa24ff88b8ec8c4aa7de5 Mon Sep 17 00:00:00 2001
From: Volker Schukai <volker.schukai@schukai.com>
Date: Wed, 18 Oct 2023 01:41:13 +0200
Subject: [PATCH] feat: implement core functions

---
 .config/demo_ssh_key           |  16 ++
 .config/demo_ssh_key.pub       |   1 +
 .config/sshd_config            |  14 ++
 .config/temp_ed25518_key       |   7 +
 .config/temp_ed25518_key.pub   |   1 +
 .config/temp_rsa_key           |  38 ++++
 .config/temp_rsa_key.pub       |   1 +
 .idea/go.imports.xml           |   6 +
 .mockery.yaml                  |   2 +
 cmd/main.go                    |  39 ----
 devenv.nix                     |  48 +++++
 error.go                       |  23 ---
 errors.go                      |  32 +++
 event-bus.go                   |  69 +++++++
 event-bus_test.go              |  91 +++++++++
 executor.go                    | 246 -----------------------
 executor_test.go               | 137 -------------
 go.mod                         |  46 ++++-
 go.sum                         | 125 +++++++++++-
 gob.go                         |  47 -----
 gob_test.go                    |  29 ---
 import.go                      | 177 +++++++++++++++++
 import_test.go                 | 124 ++++++++++++
 issue-1_test.go                | 135 -------------
 job-log.go                     |   6 +-
 job-run.go                     | 202 -------------------
 job-stat.go                    |  23 ---
 job-status.go                  |  17 --
 job.go                         | 252 ++++++++++++++----------
 job_test.go                    | 232 ++++++++++------------
 jobs.go                        | 240 -----------------------
 jobs_test.go                   |  44 -----
 json.go                        |  65 -------
 json_test.go                   |   1 -
 log-writer.go                  | 108 -----------
 manager.go                     | 331 +++++++++++++++++++++----------
 manager_test.go                | 196 +++++++++++++++++++
 manger_test.go                 | 158 ---------------
 prority.go                     |   9 -
 queue.go                       | 120 ++++++++++++
 queue_test.go                  | 220 +++++++++++++++++++++
 runnable-counter.go            |  38 ++++
 runnable-counter_test.go       |  24 +++
 runnable-dummy.go              |  13 ++
 runnable-dummy_test.go         |  16 ++
 runnable-fileoperation.go      |  99 ++++++++++
 runnable-fileoperation_test.go |  67 +++++++
 runnable-gorm.go               |  55 ++++++
 runnable-gorm_test.go          |  46 +++++
 runnable-http.go               |  53 +++++
 runnable-http_test.go          |  36 ++++
 runnable-mail.go               |  79 ++++++++
 runnable-mail_test.go          | 186 ++++++++++++++++++
 runnable-sftp.go               | 209 ++++++++++++++++++++
 runnable-sftp_test.go          | 296 ++++++++++++++++++++++++++++
 runnable-shell.go              |  52 +++++
 runnable-shell_test.go         |  46 +++++
 runnable.go                    |  91 ++-------
 runnable_test.go               | 135 +++----------
 scheduler.go                   | 344 +++++++++++++++++++++++++++++++++
 scheduler_test.go              | 257 ++++++++++++++++++++++++
 topological-sort.go            |  80 ++++----
 topological-sort_test.go       | 172 +++++++++--------
 worker.go                      | 179 +++++++++++++++++
 worker_test.go                 | 204 +++++++++++++++++++
 65 files changed, 4284 insertions(+), 2171 deletions(-)
 create mode 100644 .config/demo_ssh_key
 create mode 100644 .config/demo_ssh_key.pub
 create mode 100644 .config/sshd_config
 create mode 100755 .config/temp_ed25518_key
 create mode 100644 .config/temp_ed25518_key.pub
 create mode 100755 .config/temp_rsa_key
 create mode 100644 .config/temp_rsa_key.pub
 create mode 100644 .idea/go.imports.xml
 create mode 100644 .mockery.yaml
 delete mode 100644 cmd/main.go
 delete mode 100644 error.go
 create mode 100644 errors.go
 create mode 100644 event-bus.go
 create mode 100644 event-bus_test.go
 delete mode 100644 executor.go
 delete mode 100644 executor_test.go
 delete mode 100644 gob.go
 delete mode 100644 gob_test.go
 create mode 100644 import.go
 create mode 100644 import_test.go
 delete mode 100644 issue-1_test.go
 delete mode 100644 job-run.go
 delete mode 100644 job-status.go
 delete mode 100644 jobs.go
 delete mode 100644 jobs_test.go
 delete mode 100644 json.go
 delete mode 100644 json_test.go
 delete mode 100644 log-writer.go
 create mode 100644 manager_test.go
 delete mode 100644 manger_test.go
 delete mode 100644 prority.go
 create mode 100644 queue.go
 create mode 100644 queue_test.go
 create mode 100644 runnable-counter.go
 create mode 100644 runnable-counter_test.go
 create mode 100644 runnable-dummy.go
 create mode 100644 runnable-dummy_test.go
 create mode 100644 runnable-fileoperation.go
 create mode 100644 runnable-fileoperation_test.go
 create mode 100644 runnable-gorm.go
 create mode 100644 runnable-gorm_test.go
 create mode 100644 runnable-http.go
 create mode 100644 runnable-http_test.go
 create mode 100644 runnable-mail.go
 create mode 100644 runnable-mail_test.go
 create mode 100644 runnable-sftp.go
 create mode 100644 runnable-sftp_test.go
 create mode 100644 runnable-shell.go
 create mode 100644 runnable-shell_test.go
 create mode 100644 scheduler.go
 create mode 100644 scheduler_test.go
 create mode 100644 worker.go
 create mode 100644 worker_test.go

diff --git a/.config/demo_ssh_key b/.config/demo_ssh_key
new file mode 100644
index 0000000..df84a93
--- /dev/null
+++ b/.config/demo_ssh_key
@@ -0,0 +1,16 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAIEA7Ct96yDKZ1toOjMigoUV+bmH/z7kSWwtyLgjEQzN0dDU88Lo0ebK
+Dj2fHbo1GZnAJg77qDqIGzWMiY8oOOfiZXpj8sPvDmVoSWyAwKZhzIHigiNY9y9nW38y28
+lUC/sRBe9NTRKKEyq2Nu3QAQ60kzI5cKuZzKslU7Cz+9f8iRsAAAIAKzE8bysxPG8AAAAH
+c3NoLXJzYQAAAIEA7Ct96yDKZ1toOjMigoUV+bmH/z7kSWwtyLgjEQzN0dDU88Lo0ebKDj
+2fHbo1GZnAJg77qDqIGzWMiY8oOOfiZXpj8sPvDmVoSWyAwKZhzIHigiNY9y9nW38y28lU
+C/sRBe9NTRKKEyq2Nu3QAQ60kzI5cKuZzKslU7Cz+9f8iRsAAAADAQABAAAAgB2FUjgR4T
+sKMf0UZzvpZtXQWy+MrPyAiTiNy9RVxWR0tinCM1gJgThGe507qWeJ0HrxZIOizWCZUPfg
+7SYv1hybtv7uHldg1FUAUZldtOPtyVwvWMMP0lLhYkfkxLHHx2ZzL3qnG//jvx4gIWcbxJ
+9C2+bDoxc/NSeo57jFHwABAAAAQCiaEiu5AAi1ZpotYpCEZXR+CgiRTDtwSOB4eM0d4rgx
+mfVdTtkQqLKhGVF6wryEwEQ6Jhz9KqE89L3HO+R3LxgAAABBAPdAheySXhXIj9/PRMW2h3
+a6f5DR2D2WqRdvOwIrVJli4+SzoPlDBg8ezOAQsIcz5dMqfLhB/Nd0KCXZ1Gi6+ZsAAABB
+APSGl65yhFZCCud8rmY5N/W9sszfi60lnMoaVo7EAv3ksDXqPWSR1R2fWxPDFp4i1SM1pv
+cI+MFN9JpfGgiDZoEAAAALdnNAYTQyMjlkNzU=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/.config/demo_ssh_key.pub b/.config/demo_ssh_key.pub
new file mode 100644
index 0000000..789af9f
--- /dev/null
+++ b/.config/demo_ssh_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDsK33rIMpnW2g6MyKChRX5uYf/PuRJbC3IuCMRDM3R0NTzwujR5soOPZ8dujUZmcAmDvuoOogbNYyJjyg45+JlemPyw+8OZWhJbIDApmHMgeKCI1j3L2dbfzLbyVQL+xEF701NEooTKrY27dABDrSTMjlwq5nMqyVTsLP71/yJGw== vs@a4229d75
diff --git a/.config/sshd_config b/.config/sshd_config
new file mode 100644
index 0000000..3a3eba6
--- /dev/null
+++ b/.config/sshd_config
@@ -0,0 +1,14 @@
+
+AuthorizedKeysCommand /nix/store/w8vm09hri2zz7yacryzzzxvsapik4ps4-coreutils-9.1/bin/cat /home/vs/workspaces/oss/go-libs/job-queues/.devenv/chroot/home/demo/.ssh/authorized_keys
+AuthorizedKeysCommandUser nobody
+
+Match User root
+    ChrootDirectory /home/vs/workspaces/oss/go-libs/job-queues/.devenv/chroot
+    ForceCommand internal-sftp
+    PasswordAuthentication no
+    PermitTunnel no
+    AllowAgentForwarding no
+    AllowTcpForwarding no
+    X11Forwarding no
+    PermitRootLogin no
+    AllowUsers demo root
diff --git a/.config/temp_ed25518_key b/.config/temp_ed25518_key
new file mode 100755
index 0000000..dbe53f1
--- /dev/null
+++ b/.config/temp_ed25518_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBxagk6nB2R86bvLZ7aNKHsmMAVeU67fIo2Wdp+EymHTgAAAJBntrIpZ7ay
+KQAAAAtzc2gtZWQyNTUxOQAAACBxagk6nB2R86bvLZ7aNKHsmMAVeU67fIo2Wdp+EymHTg
+AAAEBPmQk0dpM/BVsTi3hfROLxOgq/bzv4Sef0PNoK9Ra2NnFqCTqcHZHzpu8tnto0oeyY
+wBV5Trt8ijZZ2n4TKYdOAAAAC3ZzQGE0MjI5ZDc1AQI=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/.config/temp_ed25518_key.pub b/.config/temp_ed25518_key.pub
new file mode 100644
index 0000000..42c680f
--- /dev/null
+++ b/.config/temp_ed25518_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHFqCTqcHZHzpu8tnto0oeyYwBV5Trt8ijZZ2n4TKYdO vs@a4229d75
diff --git a/.config/temp_rsa_key b/.config/temp_rsa_key
new file mode 100755
index 0000000..045d57e
--- /dev/null
+++ b/.config/temp_rsa_key
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAuTp14MpandyyftGinDQEIs7Pr88TeogFMSdoZm5AUbUliK9M6M7n
+QHQctTeMW0TelHlwVrZCwXcQn5sCpRaWE1AtEjPtZ1r6pD2HlpAkQfxMtP9TNJPwHpJ/1V
+uofvmu4adNxN6mZw6CH34vNJGgUuFNXwq6XtbrwM4TIacEu718oSf0oNOCECijZSsbSoBn
+ljsIbCcdqXClNs0pHq2lVPj0vdzGg4PQLkcRjLPoDgiQfwOxPmL/UCBraXHoLXIyz4sNfs
+4ZQXjfLYfwgHiV145oo/kD4dgmqCfo4HOipOQlvIYs4iRHyjyvQqiirTs2h3G8ZXPBelxW
+DxEsKLfNX4XbNM6yPvYLoZadSbV2gJC/O9Wv0DLJbhucjq9X5gMMVsjgccctfnvwEZPU3s
+lZLU4ntENh5jIW8w5ONHavOX23+b2MrCfP9OHB6gnNAuo7wPfWPctapcdhQ5kWUBH+G+bU
+Epa6EAyDke9nhIy8eu+T12MrmccQFZCGKBpuR9mLAAAFgHop0+x6KdPsAAAAB3NzaC1yc2
+EAAAGBALk6deDKWp3csn7Ropw0BCLOz6/PE3qIBTEnaGZuQFG1JYivTOjO50B0HLU3jFtE
+3pR5cFa2QsF3EJ+bAqUWlhNQLRIz7Wda+qQ9h5aQJEH8TLT/UzST8B6Sf9VbqH75ruGnTc
+TepmcOgh9+LzSRoFLhTV8Kul7W68DOEyGnBLu9fKEn9KDTghAoo2UrG0qAZ5Y7CGwnHalw
+pTbNKR6tpVT49L3cxoOD0C5HEYyz6A4IkH8DsT5i/1Aga2lx6C1yMs+LDX7OGUF43y2H8I
+B4ldeOaKP5A+HYJqgn6OBzoqTkJbyGLOIkR8o8r0Kooq07NodxvGVzwXpcVg8RLCi3zV+F
+2zTOsj72C6GWnUm1doCQvzvVr9AyyW4bnI6vV+YDDFbI4HHHLX578BGT1N7JWS1OJ7RDYe
+YyFvMOTjR2rzl9t/m9jKwnz/ThweoJzQLqO8D31j3LWqXHYUOZFlAR/hvm1BKWuhAMg5Hv
+Z4SMvHrvk9djK5nHEBWQhigabkfZiwAAAAMBAAEAAAGACZTKkCwnb88AtKqnXKkcJajI+X
+EestHiYt6E+fduAEv8ewuGUOyS7bST35PWAMqZMNjvRKz82wELrRXZlyyJ6snU6wIgn54H
+JQPBxboDBh7E4P/sD+YvH4W4cxLgrmC/+RAsZ6iUtvP7KyHd0zxNLLxWwjnN1iYL39dJCg
+6XohZ1IejtMHyAV9GiUhNewAXQVecL0nxbfm5DPYhzPhEBYlnWXxPSeO8Sj7JBaCPONxue
+TdOlEXMRfdWPeM7ZkbA8dg4Khl3uAR4hFKhW6QfsI/uBlgKdeuDKzcKsAAQHfa7dmqIO7Q
+dsOa41DnyQEw1X9H6p9CY+G+Lc+jub4EKgIuYNGMA2+QLSujsooq4QaI6rjiyb/Hr1xPjb
+E4GOzMmM+F/6PjswhGFImZ32mYBzhBlZxaS7x+01mAG4PLiy/sOfzh1sLjTnIgxCuo2HU1
+S70VvBTOSmfCHu8x1K583fkt7iaMoN3vTDookkD758D+MqH9fdCRLbuz6LBuy3D+dxAAAA
+wF0TZH/yMLYLY8luQ2AUTTV3GNHF2ObWJP34HVi/0AY2Yp8ywMk0K5Tazc3F1X91KDWjd2
+3JPrP8m7ESJYb6WA/vJEFXO1g4EEP8gwXrBWIKa69Dj8Q36MhQQk4ClBZKFCjVMFgbW6bs
+Jm5Ft8rjoRG1ULgaHum60yJsI0e2qMZNV0Pk/KL+IhLARSYcgtYHxXt7UgQCEea0F7rKeS
+yU2Jwx9bNd62w+qTfsKEpguBIOE69J9OsXTRLyvOT4SRaarAAAAMEA5Hba3sZZ0hz8H2bt
+fxAd88SNjNCXtOxD8a4A2Y65ku5DVw9EcttZadw8YUfKOr6YPZlBGd19tv27us3Re9C67m
+VGjobdUbCWtAZ2orre7XIBVrPmFXDWJ60ABU9/O2UhABwtx4X1XfKySPETJ4ZNpZ1AMRpA
+2S0RR2chTbBVTnDQ3otHx9cP6IQhTwMLqfujeZXwFUJI/5v/5yKj1fYUDHc1elNUXugCbK
+2qeLmLQgry0XsmfPsuKRbxduSbVchXAAAAwQDPjZWFQ2ColxOg00M96MHnI0y0EL+8FGLl
+yfi9VtN+hqCVhK9p0kalqwDMad112nwELEVQtQ5JtwrqpasI1/BYMVh6YdSIPvtrFGPVJF
+iMF1UqcYVhbQ0bVzKiszFyjq7uZAEvqMVf/BpfxtZL8xJvnIZSysb8VM2yWa1lkkQ1w8KT
+efy58cz3Ha6XFIImojyYXjl6s2etsIJbsHEvl1rTSl5uPcJZfu2jw24j8ux5cT1D+0koSG
+xpqPwJ4sHVB+0AAAALdnNAYTQyMjlkNzU=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/.config/temp_rsa_key.pub b/.config/temp_rsa_key.pub
new file mode 100644
index 0000000..24908e3
--- /dev/null
+++ b/.config/temp_rsa_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC5OnXgylqd3LJ+0aKcNAQizs+vzxN6iAUxJ2hmbkBRtSWIr0zozudAdBy1N4xbRN6UeXBWtkLBdxCfmwKlFpYTUC0SM+1nWvqkPYeWkCRB/Ey0/1M0k/Aekn/VW6h++a7hp03E3qZnDoIffi80kaBS4U1fCrpe1uvAzhMhpwS7vXyhJ/Sg04IQKKNlKxtKgGeWOwhsJx2pcKU2zSkeraVU+PS93MaDg9AuRxGMs+gOCJB/A7E+Yv9QIGtpcegtcjLPiw1+zhlBeN8th/CAeJXXjmij+QPh2CaoJ+jgc6Kk5CW8hiziJEfKPK9CqKKtOzaHcbxlc8F6XFYPESwot81fhds0zrI+9guhlp1JtXaAkL871a/QMsluG5yOr1fmAwxWyOBxxy1+e/ARk9TeyVktTie0Q2HmMhbzDk40dq85fbf5vYysJ8/04cHqCc0C6jvA99Y9y1qlx2FDmRZQEf4b5tQSlroQDIOR72eEjLx675PXYyuZxxAVkIYoGm5H2Ys= vs@a4229d75
diff --git a/.idea/go.imports.xml b/.idea/go.imports.xml
new file mode 100644
index 0000000..be62eaf
--- /dev/null
+++ b/.idea/go.imports.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="GoImports">
+    <option name="optimizeImportsOnTheFly" value="false" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/.mockery.yaml b/.mockery.yaml
new file mode 100644
index 0000000..57ff361
--- /dev/null
+++ b/.mockery.yaml
@@ -0,0 +1,2 @@
+name: "SFTPServerInterface"
+output: "./mocks"
\ No newline at end of file
diff --git a/cmd/main.go b/cmd/main.go
deleted file mode 100644
index 4ee4766..0000000
--- a/cmd/main.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package main
-
-import (
-	"fmt"
-	jobqueue "gitlab.schukai.com/oss/libraries/go/services/job-queues.git"
-)
-
-func main() {
-	jq := jobqueue.NewJobs()
-	if jq == nil {
-		panic("NewJobs returned nil")
-	}
-
-	// Hinzufügen eines neuen Jobs
-	err := jq.AddJob(jobqueue.JobSpecification{
-		Id:       "test",
-		Priority: 1,
-	}, &jobqueue.ExternalProcessRunner{
-		Command: "sleep",
-		Args:    []string{"1"},
-	})
-	if err != nil {
-		panic(err)
-	}
-
-	// Abrufen aller Jobs
-	allJobs := jq.GetJobs()
-	fmt.Println("Alle Jobs:", allJobs)
-
-	// Entfernen eines Jobs
-	removed, err := jq.RemoveJob("test")
-	if err != nil {
-		panic(err)
-	}
-	if removed {
-		fmt.Println("Job wurde entfernt.")
-	}
-
-}
diff --git a/devenv.nix b/devenv.nix
index f0ce8c2..2185cd4 100644
--- a/devenv.nix
+++ b/devenv.nix
@@ -43,6 +43,7 @@
     zlib
     nodePackages.mermaid-cli
     feh
+    openssh
     
   ];
 
@@ -52,6 +53,52 @@
 
   difftastic.enable = true;
 
+  scripts.run-sshd.exec = ''
+  set -x
+  
+  cd ${config.devenv.root}/docker/sftp-server
+  ${pkgs.docker-client}/bin/docker docker build -t jobqueue-sftp-server .
+  cd -
+  
+  
+  
+  ${pkgs.coreutils}/bin/chmod 700 ${config.devenv.root}/.config/temp_rsa_key
+  ${pkgs.coreutils}/bin/chmod 700 ${config.devenv.root}/.config/temp_ed25518_key
+  
+  ${pkgs.coreutils}/bin/mkdir -p ${config.devenv.root}/.devenv/chroot/home/demo/.ssh 
+  ${pkgs.coreutils}/bin/cat ${config.devenv.root}/.config/demo_ssh_key.pub > ${config.devenv.root}/.devenv/chroot/home/demo/.ssh/authorized_keys
+  ${pkgs.coreutils}/bin/chmod 700 ${config.devenv.root}/.devenv/chroot/home/demo/.ssh
+  ${pkgs.coreutils}/bin/chmod 600 ${config.devenv.root}/.devenv/chroot/home/demo/.ssh/authorized_keys
+  
+  
+  ${pkgs.coreutils}/bin/cat <<EOF > ${config.devenv.root}/.config/sshd_config
+
+AuthorizedKeysCommand ${pkgs.coreutils}/bin/cat ${config.devenv.root}/.devenv/chroot/home/demo/.ssh/authorized_keys
+AuthorizedKeysCommandUser nobody
+
+Match User root
+    ChrootDirectory ${config.devenv.root}/.devenv/chroot
+    ForceCommand internal-sftp
+    PasswordAuthentication no
+    PermitTunnel no
+    AllowAgentForwarding no
+    AllowTcpForwarding no
+    X11Forwarding no
+    PermitRootLogin no
+    AllowUsers demo root
+EOF
+ 
+  
+  ${pkgs.openssh}/bin/sshd -D -e -o \
+     HostKey=${config.devenv.root}/.config/temp_rsa_key \
+     -f ${config.devenv.root}/.config/sshd_config \
+     -o HostKey=${config.devenv.root}/.config/temp_ed25518_key \
+     -o Port=''${1:-2222}  \
+     -o AuthorizedKeysFile=${config.devenv.root}/.devenv/chroot/home/demo/.ssh/authorized_keys \
+     -o PidFile=${config.devenv.root}/.devenv/sshd.pid   
+  
+  '';
+  
   scripts.draw-graph.exec = ''
   echo -e "Enter Meirmaid graph definition. ''${RED}End with Ctrl+D''${RESET}\n"  
   diagram=$(${pkgs.gum}/bin/gum write --placeholder "Enter Meirmaid graph definition. End with Ctrl+D")
@@ -660,4 +707,5 @@
     printLogfileAndExit 0
   '';
 
+
 }
diff --git a/error.go b/error.go
deleted file mode 100644
index 85dfb4b..0000000
--- a/error.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package jobqueue
-
-import "fmt"
-
-var (
-	ErrCPUPercentage         = fmt.Errorf("could not get CPU percentage")
-	ErrIntervalIsZero        = fmt.Errorf("interval must not be 0")
-	ErrResourceLimitExceeded = fmt.Errorf("resource limit exceeded")
-	ErrNoRunDefined          = fmt.Errorf("no runnable function defined")
-	ErrCycleDetected         = fmt.Errorf("cycle detected")
-	ErrJobAlreadyExists      = fmt.Errorf("job already exists")
-	ErrUnknownDependency     = fmt.Errorf("unknown dependency")
-	ErrMissingDependency     = fmt.Errorf("missing dependency")
-	ErrJobNotFound           = fmt.Errorf("job not found")
-	ErrJobIsDependency       = fmt.Errorf("job is a dependency")
-	ErrNotRunning            = fmt.Errorf("job queue is not running")
-	ErrAlreadyPaused         = fmt.Errorf("job queue is already paused")
-	ErrAlreadyStopped        = fmt.Errorf("job queue is already stopped")
-	ErrNotPaused             = fmt.Errorf("job queue is not paused")
-	ErrAlreadyStarted        = fmt.Errorf("job queue is already started")
-	ErrTimeout               = fmt.Errorf("job timed out")
-	//ErrInitializationFailed  = fmt.Errorf("resource monitoring initialization failed")
-)
diff --git a/errors.go b/errors.go
new file mode 100644
index 0000000..ec304a7
--- /dev/null
+++ b/errors.go
@@ -0,0 +1,32 @@
+package jobqueue
+
+import (
+	"fmt"
+)
+
+var (
+	ErrMissingDependency            = fmt.Errorf("missing dependency")
+	ErrCycleDetected                = fmt.Errorf("cycle detected")
+	ErrQueueEmpty                   = fmt.Errorf("queue is empty")
+	ErrJobAlreadyExists             = fmt.Errorf("job already exists")
+	ErrWorkerNotRunning             = fmt.Errorf("worker is not running")
+	ErrMaxJobsReached               = fmt.Errorf("maximum number of jobs reached")
+	ErrWorkerAlreadyRunning         = fmt.Errorf("worker is already running")
+	ErrWorkerAlreadyAdded           = fmt.Errorf("worker is already added")
+	ErrWorkerNotAdded               = fmt.Errorf("worker is not added")
+	ErrNoWorkers                    = fmt.Errorf("no workers")
+	ErrWorkerAlreadyStopped         = fmt.Errorf("worker is already stopped")
+	ErrManagerAlreadyStopped        = fmt.Errorf("manager is already stopped")
+	ErrManagerAlreadyRunning        = fmt.Errorf("manager is already running")
+	ErrManagerNotRunning            = fmt.Errorf("manager is not running")
+	ErrJobNotScheduled              = fmt.Errorf("job is not scheduled")
+	ErrCronNotInitialized           = fmt.Errorf("cron is not initialized")
+	ErrCPUPercentage                = fmt.Errorf("cpu percentage must be between 0 and 100")
+	ErrIntervalIsZero               = fmt.Errorf("interval must be greater than 0")
+	ErrUnknownRunnableType          = fmt.Errorf("unknown runnable type")
+	ErrUnknownSchedulerType         = fmt.Errorf("unknown scheduler type")
+	ErrUnsupportedDatabaseType      = fmt.Errorf("unsupported database type")
+	ErrUnsupportedFileOption        = fmt.Errorf("unsupported file option")
+	ErrUnsupportedCredentialType    = fmt.Errorf("unsupported credential type")
+	ErrUnsupportedTransferDirection = fmt.Errorf("unsupported transfer direction")
+)
diff --git a/event-bus.go b/event-bus.go
new file mode 100644
index 0000000..23def45
--- /dev/null
+++ b/event-bus.go
@@ -0,0 +1,69 @@
+package jobqueue
+
+import (
+	"sync"
+)
+
+type EventName string
+
+const (
+	JobAdded   EventName = "JobAdded"
+	JobRemoved EventName = "JobRemoved"
+	ExecuteJob EventName = "ExecuteJob"
+	JobReady   EventName = "JobReady"
+	QueueJob   EventName = "QueueJob"
+	// add more events as needed
+)
+
+type Event struct {
+	Name EventName
+	Data any
+}
+
+// EventBus is a simple event bus
+type EventBus struct {
+	subscribers map[EventName][]chan interface{}
+	mu          sync.RWMutex
+}
+
+// NewEventBus creates a new event bus
+func NewEventBus() *EventBus {
+	return &EventBus{
+		subscribers: make(map[EventName][]chan interface{}),
+	}
+}
+
+// Subscribe adds a channel to the subscribers list
+func (eb *EventBus) Subscribe(name EventName, ch chan interface{}) {
+	eb.mu.Lock()
+	defer eb.mu.Unlock()
+	if _, found := eb.subscribers[name]; !found {
+		eb.subscribers[name] = []chan interface{}{}
+	}
+	eb.subscribers[name] = append(eb.subscribers[name], ch)
+}
+
+// Unsubscribe removes a channel from the subscribers list
+func (eb *EventBus) Unsubscribe(name EventName, ch chan interface{}) {
+	eb.mu.Lock()
+	defer eb.mu.Unlock()
+	if channels, found := eb.subscribers[name]; found {
+		for i := range channels {
+			if channels[i] == ch {
+				eb.subscribers[name] = append(channels[:i], channels[i+1:]...)
+				break
+			}
+		}
+	}
+}
+
+// Publish publishes an event to all subscribers
+func (eb *EventBus) Publish(name EventName, data interface{}) {
+	eb.mu.RLock()
+	defer eb.mu.RUnlock()
+	if channels, found := eb.subscribers[name]; found {
+		for _, ch := range channels {
+			ch <- Event{Name: name, Data: data}
+		}
+	}
+}
diff --git a/event-bus_test.go b/event-bus_test.go
new file mode 100644
index 0000000..66671b6
--- /dev/null
+++ b/event-bus_test.go
@@ -0,0 +1,91 @@
+package jobqueue
+
+import (
+	"sync"
+	"testing"
+	"time"
+)
+
+func TestSubscribeAndPublish(t *testing.T) {
+	eb := NewEventBus()
+
+	jobAddedCh := make(chan interface{}, 1)
+	eb.Subscribe(JobAdded, jobAddedCh)
+
+	jobData := "New Job Data"
+	eb.Publish(JobAdded, jobData)
+
+	select {
+	case receivedData := <-jobAddedCh:
+
+		rd := receivedData.(Event)
+
+		if rd.Data != jobData {
+			t.Errorf("Received data %v, want %v", receivedData, jobData)
+		}
+	case <-time.After(1 * time.Second):
+		t.Error("Timed out waiting for published event")
+	}
+}
+
+func TestUnsubscribe(t *testing.T) {
+	eb := NewEventBus()
+
+	jobAddedCh := make(chan interface{}, 1)
+	eb.Subscribe(JobAdded, jobAddedCh)
+	eb.Unsubscribe(JobAdded, jobAddedCh)
+
+	jobData := "New Job Data"
+	eb.Publish(JobAdded, jobData)
+
+	select {
+	case <-jobAddedCh:
+		t.Error("Received data after unsubscribing")
+	case <-time.After(1 * time.Second):
+		// Test passes if it times out (no data received)
+	}
+}
+
+func TestMultipleSubscribers(t *testing.T) {
+	eb := NewEventBus()
+
+	jobAddedCh1 := make(chan interface{}, 1)
+	jobAddedCh2 := make(chan interface{}, 1)
+	eb.Subscribe(JobAdded, jobAddedCh1)
+	eb.Subscribe(JobAdded, jobAddedCh2)
+
+	jobData := "New Job Data"
+	eb.Publish(JobAdded, jobData)
+
+	var wg sync.WaitGroup
+	wg.Add(2)
+
+	go func() {
+		defer wg.Done()
+		select {
+		case receivedData := <-jobAddedCh1:
+			rd := receivedData.(Event)
+			if rd.Data != jobData {
+				t.Errorf("Received data %v, want %v", receivedData, jobData)
+			}
+		case <-time.After(1 * time.Second):
+			t.Error("Timed out waiting for published event on channel 1")
+		}
+	}()
+
+	go func() {
+		defer wg.Done()
+		select {
+		case receivedData := <-jobAddedCh2:
+			rd := receivedData.(Event)
+
+			if rd.Data != jobData {
+				t.Errorf("Received data %v, want %v", receivedData, jobData)
+			}
+		case <-time.After(1 * time.Second):
+			t.Error("Timed out waiting for published event on channel 2")
+		}
+	}()
+
+	wg.Wait()
+}
diff --git a/executor.go b/executor.go
deleted file mode 100644
index d9b767e..0000000
--- a/executor.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package jobqueue
-
-import (
-	"context"
-	"sync"
-	"sync/atomic"
-	"time"
-)
-
-const (
-	MODE_STOPPED = iota
-	MODE_RUNNING
-	MODE_PAUSED
-)
-
-type mode int32
-
-type shouldStopFunc func(executor *jobExecutor) bool
-
-type jobExecutor struct {
-	mutex sync.Mutex
-
-	Queue        JobsInterface
-	Ctx          context.Context
-	CancelFunc   context.CancelFunc
-	MaxParallel  int
-	interval     time.Duration
-	Ticker       *time.Ticker
-	cleanupTimer *time.Ticker
-	//stopChan     chan struct{}
-	sem chan struct{}
-
-	pauseChan  chan struct{}
-	resumeChan chan struct{}
-
-	runningMode int32
-
-	doneChan chan struct{}
-
-	shouldStop shouldStopFunc
-}
-
-func NewJobExecutor(queue JobsInterface, maxParallel int, interval time.Duration, shouldStopFunc shouldStopFunc) *jobExecutor {
-
-	execCtx, cancelFunc := context.WithCancel(context.Background())
-
-	return &jobExecutor{
-		Queue:        queue,
-		Ctx:          execCtx,
-		CancelFunc:   cancelFunc,
-		MaxParallel:  maxParallel,
-		interval:     interval,
-		Ticker:       nil,
-		cleanupTimer: nil,
-		//stopChan:     make(chan struct{}),
-		sem: make(chan struct{}, maxParallel),
-
-		pauseChan:   make(chan struct{}),
-		resumeChan:  make(chan struct{}),
-		runningMode: MODE_STOPPED,
-		shouldStop:  shouldStopFunc,
-	}
-}
-
-func (je *jobExecutor) executeJobs() {
-
-	je.mutex.Lock()
-	je.Ticker = time.NewTicker(je.interval)
-	je.cleanupTimer = time.NewTicker(10 * je.interval)
-	je.setRunningFlag(MODE_RUNNING)
-	je.mutex.Unlock()
-
-	for {
-		select {
-		case <-je.Ticker.C:
-
-			if !je.IsPaused() {
-				je.runJobs()
-				if je.shouldStop != nil && je.shouldStop(je) {
-					_ = je.Stop()
-					return
-				}
-			}
-
-		case <-je.cleanupTimer.C:
-			je.Queue.Cleanup()
-		case <-je.pauseChan:
-			je.setRunningFlag(MODE_PAUSED)
-		case <-je.resumeChan:
-			je.setRunningFlag(MODE_RUNNING)
-		case <-je.Ctx.Done():
-
-			je.mutex.Lock()
-			je.Ticker.Stop()
-			je.cleanupTimer.Stop()
-			je.setRunningFlag(MODE_STOPPED)
-			je.mutex.Unlock()
-
-			return
-		}
-	}
-
-}
-
-func (je *jobExecutor) runJobs() {
-	// Get jobs that can be executed
-	jobs := je.Queue.GetExecutableJobs()
-	if len(jobs) == 0 {
-		return
-	}
-
-	var wg sync.WaitGroup
-
-	// Map to track the status of executed jobs
-	jobStatus := make(map[JobIDType]bool)
-	for _, job := range jobs {
-		jobStatus[job.GetId()] = false
-	}
-
-	// Channel for coordinating job execution
-	jobChan := make(chan ReadOnlyJob)
-
-	// Determine the number of jobs to be sent to the channel
-	for _, job := range jobs {
-		dependencies := job.GetDependencies()
-		canRun := true
-		for _, dependencyID := range dependencies {
-			// Check if dependencies have already been executed
-			if !jobStatus[dependencyID] {
-				canRun = false
-				break
-			}
-		}
-
-		if canRun {
-			wg.Add(1) // Increment the WaitGroup counter for each job to be sent
-		}
-	}
-
-	// Loop through all jobs and execute only if dependencies are met
-	go func() {
-		for _, job := range jobs {
-			dependencies := job.GetDependencies()
-			canRun := true
-			for _, dependencyID := range dependencies {
-				// Check if dependencies have already been executed
-				if !jobStatus[dependencyID] {
-					canRun = false
-					break
-				}
-			}
-
-			if canRun {
-				// Send the job to the job channel
-				jobChan <- job
-			}
-		}
-		close(jobChan) // Close the channel after all jobs have been sent
-	}()
-
-	maxParallel := je.MaxParallel
-	if len(jobs) < maxParallel {
-		maxParallel = len(jobs)
-	}
-
-	// Execute jobs in parallel
-	for i := 0; i < maxParallel; i++ {
-		go func() {
-			for job := range jobChan {
-				job.Run(je.Ctx)
-				// Mark the job as executed
-				jobStatus[job.GetId()] = true
-				wg.Done()
-			}
-		}()
-	}
-
-	// Wait for all jobs to complete before returning
-	wg.Wait()
-}
-
-func (je *jobExecutor) Start() error {
-
-	if je.IsRunning() {
-		return ErrAlreadyStarted
-	}
-
-	if je.IsPaused() {
-		return je.Resume()
-	}
-
-	go je.executeJobs()
-	return nil
-}
-
-func (je *jobExecutor) Stop() error {
-
-	if !je.IsRunning() && !je.IsPaused() {
-		return ErrAlreadyStopped
-	}
-
-	je.CancelFunc()
-	return nil
-}
-
-func (je *jobExecutor) Pause() error {
-
-	if je.IsPaused() {
-		return ErrAlreadyPaused
-	}
-
-	je.mutex.Lock()
-	je.pauseChan <- struct{}{}
-	je.setRunningFlag(MODE_PAUSED)
-	je.mutex.Unlock()
-
-	return nil
-
-}
-
-func (je *jobExecutor) Resume() error {
-
-	if !je.IsPaused() {
-		return ErrNotPaused
-	}
-
-	je.mutex.Lock()
-	je.resumeChan <- struct{}{}
-	je.setRunningFlag(MODE_RUNNING)
-	je.mutex.Unlock()
-
-	return nil
-
-}
-
-func (je *jobExecutor) IsPaused() bool {
-	return atomic.LoadInt32(&je.runningMode) == MODE_PAUSED
-}
-
-func (je *jobExecutor) IsRunning() bool {
-	return atomic.LoadInt32(&je.runningMode) == MODE_RUNNING
-}
-
-func (je *jobExecutor) setRunningFlag(mode int32) {
-	atomic.StoreInt32(&je.runningMode, mode)
-}
diff --git a/executor_test.go b/executor_test.go
deleted file mode 100644
index f38f6aa..0000000
--- a/executor_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package jobqueue
-
-import (
-	"context"
-	"testing"
-	"time"
-)
-
-type TestRunnable struct{}
-
-func (r TestRunnable) Run(ctx context.Context) (int, any, error) {
-	// Dummy run implementation
-	return 0, nil, nil
-}
-
-func TestJobExecutorStartAndStop(t *testing.T) {
-	queue := NewJobs()
-	executor := NewJobExecutor(queue, 1, time.Millisecond*50, nil)
-
-	// Fügen Sie einen Job zur Warteschlange hinzu
-	err := queue.AddJob(JobSpecification{
-		Id:          "test-job",
-		Priority:    1,
-		Concurrency: 1,
-	}, TestRunnable{})
-
-	if err != nil {
-		t.Fatalf("Failed to add job: %v", err)
-	}
-
-	err = executor.Start()
-	if err != nil {
-		t.Errorf("Failed to start executor: %v", err)
-	}
-
-	for i := 0; i < 10; i++ {
-		time.Sleep(time.Millisecond * 100)
-		if executor.IsRunning() {
-			break
-		}
-	}
-
-	if !executor.IsRunning() {
-		t.Errorf("Executor should be running")
-	}
-
-	err = executor.Stop()
-	for i := 0; i < 10; i++ {
-		time.Sleep(time.Millisecond * 100)
-		if !executor.IsRunning() {
-			break
-		}
-	}
-
-	if err != nil {
-		t.Errorf("Failed to stop executor: %v", err)
-	}
-
-	if executor.IsRunning() {
-		t.Errorf("Executor should not be running")
-	}
-}
-
-func TestJobExecutorPauseAndResume(t *testing.T) {
-	queue := NewJobs()
-	executor := NewJobExecutor(queue, 1, time.Millisecond*50, nil)
-
-	// Fügen Sie einen Job zur Warteschlange hinzu
-	err := queue.AddJob(JobSpecification{
-		Id:          "test-job",
-		Priority:    1,
-		Concurrency: 1,
-	}, TestRunnable{})
-	if err != nil {
-		t.Fatalf("Failed to add job: %v", err)
-	}
-
-	err = executor.Start()
-	if err != nil {
-		t.Errorf("Failed to start executor: %v", err)
-	}
-
-	for i := 0; i < 10; i++ {
-		time.Sleep(time.Millisecond * 100)
-		if executor.IsRunning() {
-			break
-		}
-	}
-
-	err = executor.Pause()
-	if err != nil {
-		t.Errorf("Failed to pause executor: %v", err)
-	}
-
-	for i := 0; i < 10; i++ {
-		time.Sleep(time.Millisecond * 100)
-		if executor.IsPaused() {
-			break
-		}
-	}
-
-	if !executor.IsPaused() {
-		t.Errorf("Executor should be paused")
-	}
-
-	err = executor.Resume()
-	if err != nil {
-		t.Errorf("Failed to resume executor: %v", err)
-	}
-
-	for i := 0; i < 10; i++ {
-		time.Sleep(time.Millisecond * 100)
-		if executor.IsRunning() {
-			break
-		}
-	}
-
-	if executor.IsPaused() {
-		t.Errorf("Executor should not be paused")
-	}
-
-	err = executor.Stop()
-	if err != nil {
-		t.Errorf("Failed to stop executor: %v", err)
-	}
-
-	for i := 0; i < 10; i++ {
-		time.Sleep(time.Millisecond * 100)
-		if !executor.IsRunning() {
-			break
-		}
-	}
-
-	if executor.IsRunning() {
-		t.Errorf("Executor should not be running")
-	}
-}
diff --git a/go.mod b/go.mod
index adcd800..4f9576d 100644
--- a/go.mod
+++ b/go.mod
@@ -3,19 +3,47 @@ module gitlab.schukai.com/oss/libraries/go/services/job-queues.git
 go 1.20
 
 require (
+	github.com/docker/docker v24.0.6+incompatible
+	github.com/pkg/sftp v1.13.6
+	github.com/robfig/cron/v3 v3.0.1
+	github.com/shirou/gopsutil/v3 v3.23.9
+	github.com/stretchr/testify v1.8.4
+	golang.org/x/crypto v0.14.0
+	gopkg.in/yaml.v3 v3.0.1
+	gorm.io/driver/mysql v1.5.2
+	gorm.io/gorm v1.25.5
+)
+
+require (
+	github.com/DATA-DOG/go-sqlmock v1.5.0 // indirect
+	github.com/Microsoft/go-winio v0.6.1 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-ole/go-ole v1.2.6 // indirect
-	github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+	github.com/distribution/reference v0.5.0 // indirect
+	github.com/docker/distribution v2.8.3+incompatible // indirect
+	github.com/docker/go-connections v0.4.0 // indirect
+	github.com/docker/go-units v0.5.0 // indirect
+	github.com/go-ole/go-ole v1.3.0 // indirect
+	github.com/go-sql-driver/mysql v1.7.1 // indirect
+	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/kr/fs v0.1.0 // indirect
+	github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
+	github.com/moby/term v0.5.0 // indirect
+	github.com/morikuni/aec v1.0.0 // indirect
+	github.com/opencontainers/go-digest v1.0.0 // indirect
+	github.com/opencontainers/image-spec v1.0.2 // indirect
+	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
-	github.com/robfig/cron/v3 v3.0.1 // indirect
-	github.com/shirou/gopsutil v3.21.11+incompatible // indirect
-	github.com/shirou/gopsutil/v3 v3.23.9 // indirect
+	github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
 	github.com/shoenig/go-m1cpu v0.1.6 // indirect
-	github.com/stretchr/testify v1.8.4 // indirect
 	github.com/tklauser/go-sysconf v0.3.12 // indirect
 	github.com/tklauser/numcpus v0.6.1 // indirect
 	github.com/yusufpapurcu/wmi v1.2.3 // indirect
-	golang.org/x/sys v0.12.0 // indirect
-	gopkg.in/yaml.v3 v3.0.1 // indirect
+	golang.org/x/mod v0.8.0 // indirect
+	golang.org/x/net v0.10.0 // indirect
+	golang.org/x/sys v0.13.0 // indirect
+	golang.org/x/time v0.3.0 // indirect
+	golang.org/x/tools v0.6.0 // indirect
+	gotest.tools/v3 v3.5.1 // indirect
 )
diff --git a/go.sum b/go.sum
index 9bed6c8..8181d58 100644
--- a/go.sum
+++ b/go.sum
@@ -1,27 +1,68 @@
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
+github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
 github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
+github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
 github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0=
+github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
+github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
 github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
-github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
-github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
+github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig=
+github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
 github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
 github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
-github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
-github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
 github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E=
 github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA=
 github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
 github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
+github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
 github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -34,16 +75,82 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA
 github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
 github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
 github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
 github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
 golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs=
+gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8=
+gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
+gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
diff --git a/gob.go b/gob.go
deleted file mode 100644
index ec53366..0000000
--- a/gob.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package jobqueue
-
-import (
-	"bytes"
-	"encoding/gob"
-	"sync"
-)
-
-// SerializeJob serializes JobData and JobSpecification into a byte slice
-func (j *job) SerializeJob() ([]byte, error) {
-	var buf bytes.Buffer
-	enc := gob.NewEncoder(&buf)
-
-	if err := enc.Encode(j.JobData); err != nil {
-		return nil, err
-	}
-	if err := enc.Encode(j.JobSpecification); err != nil {
-		return nil, err
-	}
-
-	return buf.Bytes(), nil
-}
-
-// DeserializeJob deserializes a byte slice into a job struct
-func DeserializeJob(data []byte) (*job, error) {
-	var jobData JobData
-	var jobSpec JobSpecification
-
-	buf := bytes.NewBuffer(data)
-	dec := gob.NewDecoder(buf)
-
-	if err := dec.Decode(&jobData); err != nil {
-		return nil, err
-	}
-	if err := dec.Decode(&jobSpec); err != nil {
-		return nil, err
-	}
-
-	job := &job{
-		JobData:          jobData,
-		JobSpecification: jobSpec,
-		mu:               sync.Mutex{},
-		//ctx:              context.Background(),
-	}
-
-	return job, nil
-}
diff --git a/gob_test.go b/gob_test.go
deleted file mode 100644
index 170ec9a..0000000
--- a/gob_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package jobqueue
-
-import (
-	"github.com/stretchr/testify/assert"
-	"testing"
-)
-
-func TestSerializationAndDeserialization(t *testing.T) {
-	// Create a job instance and populate its fields
-	originalJob := &job{
-		JobSpecification: JobSpecification{
-			Id:       "testJob",
-			Priority: 1,
-		},
-	}
-
-	// Serialize the job to a byte slice
-	data, err := originalJob.SerializeJob()
-	assert.Nil(t, err)
-
-	// Deserialize the byte slice back to a job struct
-	deserializedJob, err := DeserializeJob(data)
-	assert.Nil(t, err)
-
-	// Compare the original and deserialized jobs
-	assert.Equal(t, originalJob.Id, deserializedJob.Id)
-	assert.Equal(t, originalJob.Priority, deserializedJob.Priority)
-
-}
diff --git a/import.go b/import.go
new file mode 100644
index 0000000..371f122
--- /dev/null
+++ b/import.go
@@ -0,0 +1,177 @@
+package jobqueue
+
+import (
+	"gopkg.in/yaml.v3"
+	"os"
+	"time"
+)
+
+type JobImport struct {
+	ID           string          `yaml:"id" json:"id"`
+	Priority     int             `yaml:"priority" json:"priority"`
+	Timeout      time.Duration   `yaml:"timeout" json:"timeout"`
+	MaxRetries   uint            `yaml:"maxRetries" json:"maxRetries"`
+	RetryDelay   time.Duration   `yaml:"retryDelay" json:"retryDelay"`
+	Dependencies []string        `yaml:"dependencies" json:"dependencies,omitempty"`
+	Runnable     RunnableImport  `yaml:"runnable" json:"runnable"`
+	Scheduler    SchedulerImport `yaml:"scheduler" json:"scheduler,omitempty"`
+}
+
+type RunnableImport struct {
+	Type string         `yaml:"type" json:"type"`
+	Data map[string]any `yaml:"data,omitempty" json:"data,omitempty"`
+}
+
+type SchedulerImport struct {
+	Type     string        `yaml:"type" json:"type"`
+	Interval time.Duration `yaml:"interval,omitempty" json:"interval,omitempty"`
+	Spec     string        `yaml:"spec,omitempty" json:"spec,omitempty"`
+	Delay    time.Duration `yaml:"delay,omitempty" json:"delay,omitempty"`
+	Event    string        `yaml:"event,omitempty" json:"event,omitempty"`
+}
+
+func ReadYAMLFile(filePath string) ([]JobImport, error) {
+	data, err := os.ReadFile(filePath)
+	if err != nil {
+		return nil, err
+	}
+
+	var jobs []JobImport
+	err = yaml.Unmarshal(data, &jobs)
+	if err != nil {
+		return nil, err
+	}
+
+	return jobs, nil
+}
+
+func ReadJsonFile(filePath string) ([]JobImport, error) {
+	data, err := os.ReadFile(filePath)
+	if err != nil {
+		return nil, err
+	}
+
+	var jobs []JobImport
+	err = yaml.Unmarshal(data, &jobs)
+	if err != nil {
+		return nil, err
+	}
+
+	return jobs, nil
+}
+
+func CreateJobAndSchedulerFromImport(jobImport JobImport) (GenericJob, Scheduler, error) {
+
+	var job GenericJob
+
+	switch jobImport.Runnable.Type {
+	case "Shell":
+
+		runner := &ShellRunnable{
+			ScriptPath: jobImport.Runnable.Data["ScriptPath"].(string),
+		}
+
+		job = GenericJob(&Job[ShellResult]{
+			id:         JobID(jobImport.ID),
+			priority:   Priority(jobImport.Priority),
+			timout:     jobImport.Timeout,
+			maxRetries: jobImport.MaxRetries,
+			RetryDelay: jobImport.RetryDelay,
+			runner:     runner,
+		})
+
+	case "Counter":
+		runner := &CounterRunnable{
+			Count: jobImport.Runnable.Data["Count"].(int),
+		}
+		job = GenericJob(&Job[CounterResult]{
+			id:         JobID(jobImport.ID),
+			priority:   Priority(jobImport.Priority),
+			timout:     jobImport.Timeout,
+			maxRetries: jobImport.MaxRetries,
+			RetryDelay: jobImport.RetryDelay,
+			runner:     runner,
+		})
+
+	case "HTTP":
+		runner := &HTTPRunnable{
+			URL: jobImport.Runnable.Data["URL"].(string),
+		}
+		job = GenericJob(&Job[HTTPResult]{id: JobID(jobImport.ID),
+			priority:   Priority(jobImport.Priority),
+			timout:     jobImport.Timeout,
+			maxRetries: jobImport.MaxRetries,
+			RetryDelay: jobImport.RetryDelay,
+			runner:     runner,
+		})
+
+	case "DB":
+		runner := &DBRunnable{
+			Query: jobImport.Runnable.Data["Query"].(string),
+		}
+		job = GenericJob(&Job[DBResult]{id: JobID(jobImport.ID),
+			priority:   Priority(jobImport.Priority),
+			timout:     jobImport.Timeout,
+			maxRetries: jobImport.MaxRetries,
+			RetryDelay: jobImport.RetryDelay,
+			runner:     runner,
+		})
+
+	default:
+		return nil, nil, ErrUnknownRunnableType
+	}
+
+	var scheduler Scheduler
+	switch jobImport.Scheduler.Type {
+	case "Interval":
+		scheduler = &IntervalScheduler{Interval: jobImport.Scheduler.Interval}
+
+	case "Cron":
+		scheduler = &CronScheduler{Spec: jobImport.Scheduler.Spec}
+
+	case "Delay":
+		scheduler = &DelayScheduler{Delay: jobImport.Scheduler.Delay}
+
+	case "Event":
+		scheduler = &EventScheduler{Event: EventName(jobImport.Scheduler.Event)}
+
+	default:
+		return nil, nil, ErrUnknownSchedulerType
+	}
+
+	return job, scheduler, nil
+}
+
+func LoadJobsAndSchedule(filePath string, manager *Manager) error {
+
+	var err error
+	var imp []JobImport
+
+	switch filePath[len(filePath)-4:] {
+	case "yaml":
+		imp, err = ReadJsonFile(filePath)
+		break
+	case "json":
+		imp, err = ReadYAMLFile(filePath)
+		break
+
+	}
+
+	if err != nil {
+		return err
+	}
+
+	for _, imp := range imp {
+		job, scheduler, err := CreateJobAndSchedulerFromImport(imp)
+		if err != nil {
+			return err
+		}
+
+		err = manager.ScheduleJob(job, scheduler)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/import_test.go b/import_test.go
new file mode 100644
index 0000000..056ccef
--- /dev/null
+++ b/import_test.go
@@ -0,0 +1,124 @@
+package jobqueue
+
+import (
+	"github.com/stretchr/testify/assert"
+	"io/ioutil"
+	"os"
+	"testing"
+	"time"
+)
+
+func TestCreateJobAndSchedulerFromInput(t *testing.T) {
+	tests := []struct {
+		name      string
+		input     JobImport
+		wantJob   GenericJob
+		wantSched Scheduler
+		wantErr   bool
+	}{
+		{
+			name: "Shell Runnable and Interval Scheduler",
+			input: JobImport{
+				ID:         "1",
+				Priority:   1,
+				Timeout:    10 * time.Second,
+				MaxRetries: 3,
+				RetryDelay: 2 * time.Second,
+				Runnable: RunnableImport{
+					Type: "Shell",
+					Data: map[string]any{"ScriptPath": "script.sh"},
+				},
+				Scheduler: SchedulerImport{
+					Type:     "Interval",
+					Interval: 1 * time.Minute,
+				},
+			},
+			wantJob:   GenericJob(&Job[ShellResult]{ /* Initialization */ }),
+			wantSched: &IntervalScheduler{Interval: 1 * time.Minute},
+			wantErr:   false,
+		},
+		{
+			name: "Shell Runnable and Cron Scheduler",
+			input: JobImport{
+				ID:       "1",
+				Priority: 1,
+				Runnable: RunnableImport{
+					Type: "Shell",
+					Data: map[string]any{"ScriptPath": "script.sh"},
+				},
+				Scheduler: SchedulerImport{
+					Type: "Cron",
+					Spec: "*/5 * * * *",
+				},
+			},
+			wantJob:   GenericJob(&Job[ShellResult]{ /* Initialization */ }),
+			wantSched: &CronScheduler{ /* Initialization */ },
+			wantErr:   false,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			gotJob, gotSchedule, err := CreateJobAndSchedulerFromImport(tt.input)
+
+			if (err != nil) != tt.wantErr {
+				t.Errorf("CreateJobAndSchedulerFromImport() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+
+			assert.Equal(t, JobID(tt.input.ID), gotJob.GetID(), "Job ID mismatch")
+			assert.Equal(t, Priority(tt.input.Priority), gotJob.GetPriority(), "Job Priority mismatch")
+
+			assert.Equal(t, tt.input.Scheduler.Type, gotSchedule.GetType(), "Scheduler Type mismatch")
+
+		})
+	}
+}
+
+func TestReadJsonFile(t *testing.T) {
+	testContent := `[{"id": "1", "priority": 1}]`
+	tempFile, err := ioutil.TempFile("", "test.json")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(tempFile.Name())
+
+	if _, err := tempFile.Write([]byte(testContent)); err != nil {
+		t.Fatal(err)
+	}
+	tempFile.Close()
+
+	jobs, err := ReadJsonFile(tempFile.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(jobs) != 1 || jobs[0].ID != "1" || jobs[0].Priority != 1 {
+		t.Errorf("Expected job with ID '1' and priority 1, got %+v", jobs)
+	}
+}
+
+func TestReadYAMLFile(t *testing.T) {
+	testContent := `- id: "1"
+  priority: 1
+`
+	tempFile, err := ioutil.TempFile("", "test.yaml")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(tempFile.Name())
+
+	if _, err := tempFile.Write([]byte(testContent)); err != nil {
+		t.Fatal(err)
+	}
+	tempFile.Close()
+
+	jobs, err := ReadYAMLFile(tempFile.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(jobs) != 1 || jobs[0].ID != "1" || jobs[0].Priority != 1 {
+		t.Errorf("Expected job with ID '1' and priority 1, got %+v", jobs)
+	}
+}
diff --git a/issue-1_test.go b/issue-1_test.go
deleted file mode 100644
index 9669fbc..0000000
--- a/issue-1_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package jobqueue
-
-import (
-	"errors"
-	"testing"
-)
-
-// TestJobQueueWithDependencies
-// Test if jobs are sorted by priority and dependencies
-//
-// graph TD
-//
-//	A[Job1-PriorityDefault]
-//	B[Job2-PriorityHigh]
-//	C[Job3-PriorityLow]
-//	D[Job4-PriorityCritical]
-//
-//	A --> B
-//	A --> C
-//	C --> D
-func TestRunJobQueueWithDependencies(t *testing.T) {
-	jq := NewJobs()
-	if jq == nil {
-		t.Errorf("NewJobs returned nil")
-	}
-
-	// create new jobs
-	job1 := newJob(JobSpecification{Id: "1"}) // default priority is PriorityDefault
-	job2 := newJob(JobSpecification{Id: "2"})
-	job2.JobSpecification.Priority = PriorityHigh
-	job3 := newJob(JobSpecification{Id: "3"})
-	job3.JobSpecification.Priority = PriorityLow
-	job4 := newJob(JobSpecification{Id: "4"})
-	job4.JobSpecification.Priority = PriorityCritical
-
-	job3.JobSpecification.Dependencies = []JobIDType{"1"}
-	job4.JobSpecification.Dependencies = []JobIDType{"3"}
-	job2.JobSpecification.Dependencies = []JobIDType{"1"}
-
-	_ = job1
-
-}
-
-func TestIssue1NewJobQueue(t *testing.T) {
-	jq := NewJobs()
-	if jq == nil {
-		t.Errorf("NewJobs returned nil")
-	}
-
-	// create new jobs
-	job1 := JobSpecification{Id: "1"} // default priority is PriorityDefault
-	job2 := JobSpecification{Id: "2", Priority: PriorityHigh}
-	job3 := JobSpecification{Id: "3", Priority: PriorityLow}
-	job4 := JobSpecification{Id: "4", Priority: PriorityCritical}
-
-	// add jobs to jobs
-	if err := jq.AddJob(job1, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	if err := jq.AddJob(job1, nil); err == nil || !errors.Is(err, ErrJobAlreadyExists) {
-		t.Errorf("Expected ErrJobAlreadyExists, got %v", err)
-	}
-
-	if err := jq.AddJob(job2, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	// add job3 and job4 to jobs
-	if err := jq.AddJob(job3, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	if err := jq.AddJob(job4, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	// check if jobs are in jobs
-	if len(jq.jobs) != 4 {
-		t.Errorf("Failed to add all jobs to jobs")
-	}
-
-}
-
-// TestJobQueueWithDependencies
-// Test if jobs are sorted by priority and dependencies
-//
-// graph TD
-//
-//	A[Job1-PriorityDefault]
-//	B[Job2-PriorityHigh]
-//	C[Job3-PriorityLow]
-//	D[Job4-PriorityCritical]
-//
-//	A --> B
-//	A --> C
-//	C --> D
-func TestJobQueueWithDependencies(t *testing.T) {
-	jq := NewJobs()
-	if jq == nil {
-		t.Errorf("NewJobs returned nil")
-	}
-
-	job1 := JobSpecification{Id: "1"} // default priority is PriorityDefault
-	job2 := JobSpecification{Id: "2", Priority: PriorityHigh, Dependencies: []JobIDType{"1"}}
-	job3 := JobSpecification{Id: "3", Priority: PriorityLow, Dependencies: []JobIDType{"1"}}
-	job4 := JobSpecification{Id: "4", Priority: PriorityCritical, Dependencies: []JobIDType{"3"}}
-
-	// set dependencies
-	// job1 depends on nothing
-	// job2 depends on job1
-	// job3 depends on job1
-	// job4 depends on job3
-	// add jobs to jobs
-	if err := jq.AddJob(job1, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	if err := jq.AddJob(job2, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	if err := jq.AddJob(job3, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-	if err := jq.AddJob(job4, nil); err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	// check if jobs are in jobs
-	if len(jq.jobs) != 4 {
-		t.Errorf("Failed to add all jobs to jobs")
-	}
-
-}
diff --git a/job-log.go b/job-log.go
index a0be1c7..171039e 100644
--- a/job-log.go
+++ b/job-log.go
@@ -20,7 +20,7 @@ type JobLog struct {
 		Disk    int64 `json:"disk"`
 		Network int64 `json:"network"`
 	} `json:"io"`
-	ErrorMsg     string            `json:"error_msg"`
-	IsSuccessful bool              `json:"is_successful"`
-	Metadata     map[string]string `json:"metadata"`
+	ErrorMsg     string `json:"error_msg"`
+	IsSuccessful bool   `json:"is_successful"`
+	//Metadata     map[string]string `json:"metadata"`
 }
diff --git a/job-run.go b/job-run.go
deleted file mode 100644
index c468404..0000000
--- a/job-run.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package jobqueue
-
-import (
-	"context"
-	"fmt"
-	"os"
-	"time"
-)
-
-func (j *job) handleFailover() error {
-	if j.failover != nil {
-		return j.failover()
-	}
-	return nil
-}
-
-func (j *job) Run(ctx context.Context) {
-
-	var err error
-
-	defer func() {
-		if j.Status == JobRunning {
-			j.Status = JobPending
-		}
-	}()
-
-	j.mu.Lock()
-	defer j.mu.Unlock()
-
-	// Check for resource limits
-	if j.exceedsResourceLimits() {
-		j.updateStats(0, ErrResourceLimitExceeded, 0)
-		return
-	}
-
-	// Execute fail-over logic if specified
-	err = j.handleFailover()
-	if err != nil {
-		j.updateStats(0, err, 0)
-		return
-	}
-
-	// Add timeout if specified
-	if j.Timeout > 0 {
-		var cancelFunc context.CancelFunc
-		ctx, cancelFunc = context.WithTimeout(ctx, j.Timeout)
-		defer cancelFunc()
-	}
-
-	maxRetries := j.Retries
-	if maxRetries == 0 {
-		maxRetries = 1
-	}
-
-	// Run job
-	err = nil
-	for i := 0; i < maxRetries; i++ {
-		err = j.singleRun(ctx)
-		if err == nil {
-			break
-		}
-
-		if j.RetryDelay > 0 {
-			time.Sleep(j.RetryDelay)
-		}
-
-	}
-
-	// Update job status
-	j.LastRun = time.Now()
-
-	// calculate next run
-	if j.scheduleImpl != nil {
-		j.NextRun = j.scheduleImpl.Next(j.LastRun)
-	}
-
-	if err != nil {
-		j.Status = JobFailed
-		return
-	}
-
-	if j.MaxRuns > 0 && j.Stats.RunCount >= j.MaxRuns {
-		j.Status = JobFinished
-		return
-	}
-
-	j.Status = JobPending
-
-}
-
-func (j *job) singleRun(ctx context.Context) (err error) {
-	startTime := time.Now()
-
-	defer func() {
-		if r := recover(); r != nil {
-			err = fmt.Errorf("job runnable resulted in panic: %v", r)
-			j.Status = JobFailed
-		}
-
-	}()
-
-	if j.runnable == nil {
-		return ErrNoRunDefined
-	}
-
-	j.Status = JobRunning
-	exitCode, result, err := j.runnable.Run(ctx)
-
-	// Log and stats update
-	logEntry := JobLog{
-		ProcessID:    os.Getpid(),
-		StartTime:    startTime,
-		EndTime:      time.Now(),
-		ExitCode:     exitCode,
-		ErrorMsg:     "",
-		Result:       result,
-		IsSuccessful: true,
-		Metadata:     nil,
-	}
-
-	if exitCode != 0 {
-		logEntry.IsSuccessful = false
-		logEntry.ExitCode = exitCode
-	}
-
-	if err != nil {
-		logEntry.ErrorMsg = err.Error()
-		logEntry.IsSuccessful = false
-	}
-
-	j.addLogEntry(logEntry)
-	j.updateStats(exitCode, err, time.Since(startTime))
-
-	return
-}
-
-func (j *job) addLogEntry(logEntry JobLog) {
-	j.Logs = append(j.Logs, logEntry)
-
-	for _, hook := range j.telemetryHooks {
-		go hook(&logEntry)
-	}
-
-	if j.MaxLogEntries == 0 {
-		return
-	}
-
-	// Überprüfen, ob die maximale Anzahl von Log-Einträgen überschritten wurde
-	if len(j.Logs) > j.MaxLogEntries {
-		// Log-Einträge rotieren und an das Logger-Objekt senden
-		for i := 0; i < len(j.Logs)-j.MaxLogEntries; i++ {
-			if j.Logger != nil {
-				_, err := (*j.Logger).Write(j.Logs[i])
-				if err != nil {
-					continue
-				}
-			}
-		}
-
-		j.Logs = j.Logs[len(j.Logs)-j.MaxLogEntries:]
-	}
-}
-
-func (j *job) exceedsResourceLimits() bool {
-
-	currentCPU := GetCpuUsage()
-	currentMemory := GetMemoryUsage()
-
-	if j.ResourceLimits.CPULimit != 0 && currentCPU > j.ResourceLimits.CPULimit {
-		return true
-	}
-	if j.ResourceLimits.MemoryLimit != 0 && currentMemory > j.ResourceLimits.MemoryLimit {
-		return true
-	}
-	return false
-}
-
-func (j *job) updateStats(exitCode int, err error, duration time.Duration) {
-	j.Stats.RunCount++
-	if err == nil {
-		j.Stats.SuccessCount++
-	} else {
-		j.Stats.ErrorCount++
-		j.Stats.LastErrorCode = exitCode
-	}
-
-	// Aktualisieren der Zeitmetriken
-	j.Stats.TimeMetrics.TotalRunTime += duration
-	if j.Stats.RunCount == 1 {
-		j.Stats.TimeMetrics.MinRunTime = duration
-		j.Stats.TimeMetrics.MaxRunTime = duration
-	} else {
-		if duration < j.Stats.TimeMetrics.MinRunTime {
-			j.Stats.TimeMetrics.MinRunTime = duration
-		}
-		if duration > j.Stats.TimeMetrics.MaxRunTime {
-			j.Stats.TimeMetrics.MaxRunTime = duration
-		}
-	}
-
-	j.Stats.TimeMetrics.AvgRunTime = j.Stats.TimeMetrics.TotalRunTime / time.Duration(j.Stats.RunCount)
-}
diff --git a/job-stat.go b/job-stat.go
index 136ff99..68599e0 100644
--- a/job-stat.go
+++ b/job-stat.go
@@ -14,27 +14,4 @@ type JobStats struct {
 		MinRunTime   time.Duration `json:"min"`
 		TotalRunTime time.Duration `json:"total"`
 	} `json:"time_metrics"`
-	LastErrorCode     int `json:"last_error_code"`
-	LastSuccessCode   int `json:"last_success_code"`
-	PriorityEscalates int `json:"priority_escalates"`
-	ResourceUsage     struct {
-		CPU struct {
-			Avg    float64 `json:"avg"`
-			StdDev float64 `json:"std_dev"`
-		} `json:"cpu"`
-		Memory struct {
-			Avg    int `json:"avg"`
-			StdDev int `json:"std_dev"`
-		} `json:"memory"`
-		IO struct {
-			Disk struct {
-				Avg    int64 `json:"avg"`
-				StdDev int64 `json:"std_dev"`
-			} `json:"disk"`
-			Network struct {
-				Avg    int64 `json:"avg"`
-				StdDev int64 `json:"std_dev"`
-			} `json:"network"`
-		} `json:"io"`
-	} `json:"resource_usage"`
 }
diff --git a/job-status.go b/job-status.go
deleted file mode 100644
index 03756fe..0000000
--- a/job-status.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package jobqueue
-
-// JobStatus is the status of a job
-type JobStatus int
-
-const (
-	JobPending JobStatus = iota
-	JobScheduled
-	JobRunning
-	JobFailed
-	JobFinished
-)
-
-// String returns the string representation of a JobStatus
-func (js JobStatus) String() string {
-	return [...]string{"Pending", "Scheduled", "Running", "Failed", "Finished"}[js]
-}
diff --git a/job.go b/job.go
index d380aaf..1dcdb80 100644
--- a/job.go
+++ b/job.go
@@ -2,191 +2,245 @@ package jobqueue
 
 import (
 	"context"
-	"github.com/robfig/cron/v3"
+	"os"
 	"sync"
 	"time"
 )
 
-type JobIDType string
+type JobID string
 
-func (j JobIDType) String() string {
-	return string(j)
+func (id JobID) String() string {
+	return string(id)
 }
 
-func newJob(spec JobSpecification) *job {
-	j := &job{
-		JobSpecification: spec,
-		mu:               sync.Mutex{},
-	}
+type Priority int
 
-	if spec.Schedule != "" {
-		schedule, err := cron.ParseStandard(spec.Schedule)
-		if err != nil {
-			panic(err)
-		}
+const (
+	PriorityLow Priority = iota
+	PriorityDefault
+	PriorityHigh
+	PriorityCritical
+)
 
-		j.scheduleImpl = schedule
-	}
+type GenericJob interface {
+	GetID() JobID
+	GetDependencies() []JobID
 
-	if spec.Priority == 0 {
-		j.Priority = PriorityDefault
-	}
+	GetPriority() Priority
 
-	return j
+	Execute(ctx context.Context) (RunGenericResult, error)
+
+	Cancel() error
+
+	GetMaxRetries() uint
+
+	GetRetryDelay() time.Duration
+
+	GetTimeout() time.Duration
 }
 
-type ResourceLimits struct {
-	CPULimit    float64 `json:"cpu_limit,omitempty"`
-	MemoryLimit uint64  `json:"memory_limit,omitempty"`
+type Job[T any] struct {
+	id       JobID
+	priority Priority
+
+	timout     time.Duration
+	maxRetries uint
+	RetryDelay time.Duration
+
+	dependencies []JobID
+
+	mu sync.Mutex
+
+	runner Runnable[T]
+
+	stats JobStats
+	logs  []JobLog
 }
 
-type JobSpecification struct {
-	Id             JobIDType              `json:"id,omitempty"`
-	Priority       int                    `json:"priority,omitempty"`
-	MaxRuns        int                    `json:"max_runs,omitempty"`
-	Concurrency    int                    `json:"concurrency,omitempty"`
-	Schedule       string                 `json:"schedule,omitempty"`
-	Timeout        time.Duration          `json:"timeout,omitempty"`
-	Retries        int                    `json:"retries,omitempty"`
-	RetryDelay     time.Duration          `json:"retry_delay,omitempty"`
-	ResourceLimits ResourceLimits         `json:"resource_limits"`
-	Dependencies   []JobIDType            `json:"dependencies,omitempty"`
-	Tags           []string               `json:"tags,omitempty"`
-	Metadata       map[string]interface{} `json:"metadata,omitempty"`
+// NewJob creates a new job with the given id and runner
+func NewJob[T any](id JobID, runner Runnable[T]) *Job[T] {
+	return &Job[T]{
+		id:       id,
+		runner:   runner,
+		priority: PriorityDefault,
+	}
 }
 
-type JobData struct {
-	Status JobStatus `json:"status,omitempty"`
+// Execute executes the job
+func (j *Job[T]) Execute(ctx context.Context) (RunGenericResult, error) {
+	startTime := time.Now()
+	r, runnerError := j.runner.Run()
+	endTime := time.Now()
+	elapsedTime := endTime.Sub(startTime)
 
-	LastRun time.Time `json:"last_run"`
-	NextRun time.Time `json:"next_run"`
+	j.mu.Lock()
+	defer j.mu.Unlock()
 
-	Stats JobStats `json:"stats"`
-	Logs  []JobLog `json:"logs,omitempty"`
+	// Update RunCount
+	j.stats.RunCount++
 
-	Logger        *Logger
-	MaxLogEntries int
-}
+	// Update TimeMetrics
+	j.stats.TimeMetrics.TotalRunTime += elapsedTime
+	if j.stats.TimeMetrics.MinRunTime == 0 || elapsedTime < j.stats.TimeMetrics.MinRunTime {
+		j.stats.TimeMetrics.MinRunTime = elapsedTime
+	}
+	if elapsedTime > j.stats.TimeMetrics.MaxRunTime {
+		j.stats.TimeMetrics.MaxRunTime = elapsedTime
+	}
+	j.stats.TimeMetrics.AvgRunTime = j.stats.TimeMetrics.TotalRunTime / time.Duration(j.stats.RunCount)
 
-// job contains both serializable data and functional fields
-type job struct {
-	JobData          `json:"data,omitempty"`
-	JobSpecification `json:"spec,omitempty"`
+	// Update SuccessCount or ErrorCount and codes
+	if runnerError == nil {
+		j.stats.SuccessCount++
+	} else {
+		j.stats.ErrorCount++
+	}
 
-	runnable       Runnable
-	scheduleImpl   cron.Schedule
-	telemetryHooks []func(*JobLog)
-	failover       func() error
-	mu             sync.Mutex
-}
+	newLog := JobLog{
+		StartTime: startTime,
+	}
 
-type ReadOnlyJob interface {
-	GetId() JobIDType
-	GetPriority() int
-	//GetExclusive() bool
-	GetMaxRuns() int
-	GetConcurrency() int
-	GetLastRun() time.Time
-	GetNextRun() time.Time
-	GetRunnable() Runnable
-	GetSchedule() cron.Schedule
-	GetDependencies() []JobIDType
-	GetDependents() []JobIDType
-	GetRuns() int
+	if runnerError == nil {
+		newLog.IsSuccessful = true
+		newLog.ExitCode = 0
+	} else {
+		newLog.IsSuccessful = false
+		newLog.ExitCode = 1 // Set to appropriate error code if applicable
+		newLog.ErrorMsg = runnerError.Error()
+	}
+
+	newLog.StartTime = startTime
+	newLog.EndTime = endTime
+	newLog.ProcessID = os.Getpid() // Assuming you want the PID of the current process
+
+	newLog.EndTime = time.Now()
+	if runnerError != nil {
+		newLog.ErrorMsg = runnerError.Error()
+	}
 
-	GetLogs() []JobLog
+	j.logs = append(j.logs, newLog)
 
-	GetStats() JobStats
+	genericResult := RunGenericResult(r)
+	return genericResult, runnerError
+}
 
-	Run(context.Context)
+// Cancel cancels the job
+func (j *Job[T]) Cancel() error {
+	return nil
 }
 
-func (j *job) GetStats() JobStats {
+// SetPriority sets the priority of the job
+func (j *Job[T]) SetPriority(priority Priority) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.Stats
+	j.priority = priority
+	return j
 }
 
-func (j *job) GetLogs() []JobLog {
+func (j *Job[T]) GetPriority() Priority {
 	j.mu.Lock()
 	defer j.mu.Unlock()
-	return j.Logs
+
+	return j.priority
 }
 
-func (j *job) GetId() JobIDType {
+// SetTimeout sets the timeout of the job
+func (j *Job[T]) SetTimeout(timeout time.Duration) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
-	return j.Id
+
+	j.timout = timeout
+	return j
 }
 
-func (j *job) GetPriority() int {
+func (j *Job[T]) GetTimeout() time.Duration {
 	j.mu.Lock()
 	defer j.mu.Unlock()
-	return j.Priority
+
+	return j.timout
 }
 
-func (j *job) GetMaxRuns() int {
+// SetMaxRetries sets the max retries of the job
+func (j *Job[T]) SetMaxRetries(maxRetries uint) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.MaxRuns
+	j.maxRetries = maxRetries
+	return j
 }
 
-func (j *job) GetConcurrency() int {
+func (j *Job[T]) GetMaxRetries() uint {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.Concurrency
+	return j.maxRetries
 }
 
-func (j *job) GetLastRun() time.Time {
+// SetRetryDelay sets the retry delay of the job
+func (j *Job[T]) SetRetryDelay(retryDelay time.Duration) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.LastRun
+	j.RetryDelay = retryDelay
+	return j
 }
 
-func (j *job) GetNextRun() time.Time {
+func (j *Job[T]) GetRetryDelay() time.Duration {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.NextRun
+	return j.RetryDelay
 }
 
-func (j *job) GetRunnable() Runnable {
+// SetDependencies sets the dependencies of the job
+func (j *Job[T]) SetDependencies(dependencies []JobID) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.runnable
+	j.dependencies = dependencies
+	return j
 }
 
-func (j *job) GetSchedule() cron.Schedule {
+func (j *Job[T]) AddDependency(dependency JobID) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.scheduleImpl
+	j.dependencies = append(j.dependencies, dependency)
+	return j
 }
 
-func (j *job) GetDependencies() []JobIDType {
+func (j *Job[T]) RemoveDependency(dependency JobID) *Job[T] {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.Dependencies
+	for i, dep := range j.dependencies {
+		if dep == dependency {
+			j.dependencies = append(j.dependencies[:i], j.dependencies[i+1:]...)
+			break
+		}
+	}
+	return j
 }
 
-func (j *job) GetDependents() []JobIDType {
-
+func (j *Job[T]) GetDependencies() []JobID {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.Dependencies
+	return j.dependencies
 }
 
-func (j *job) GetRuns() int {
-
+// GetID returns the id of the job
+func (j *Job[T]) GetID() JobID {
 	j.mu.Lock()
 	defer j.mu.Unlock()
 
-	return j.Stats.RunCount
+	return j.id
+}
+
+// GetRunnable returns the runnable of the job
+func (j *Job[T]) GetRunnable() Runnable[T] {
+	j.mu.Lock()
+	defer j.mu.Unlock()
+	return j.runner
 }
diff --git a/job_test.go b/job_test.go
index 597cbe0..3400cef 100644
--- a/job_test.go
+++ b/job_test.go
@@ -2,163 +2,143 @@ package jobqueue
 
 import (
 	"context"
-	"encoding/json"
-	"errors"
-	"fmt"
 	"github.com/stretchr/testify/assert"
+	"os"
+	"path"
 	"testing"
 	"time"
 )
 
-type mockRunnable struct {
-	shouldFail int
-	sleep      time.Duration
-}
-
-func (r *mockRunnable) Run(ctx context.Context) (int, any, error) {
-	// Create a new context with a timeout
-
-	done := make(chan struct{})
-	var err error
-
-	go func() {
-		if r.sleep > 0 {
-			// Simulate long-running job
-			time.Sleep(r.sleep)
-		}
-		if r.shouldFail > 0 {
-			r.shouldFail--
-			err = errors.New("failed")
-		}
-		time.Sleep(1 * time.Millisecond)
-		close(done)
-	}()
-
-	// Wait until either the job is done or the timeout expires
-	select {
-	case <-done:
-		if err != nil {
-			return DefaultErrorExitCode, nil, err
-		}
-		return SuccessExitCode, nil, nil
-	case <-ctx.Done():
-
-		if ctx.Err() == context.DeadlineExceeded {
-			// It was a timeout
-			return TimeoutExitCode, nil, fmt.Errorf("timeout")
-		}
-
-		return DefaultErrorExitCode, nil, ctx.Err()
-	}
+func TestNewJob(t *testing.T) {
+	runner := &ShellRunnable{ScriptPath: "path"}
+	job := NewJob[ShellResult]("id1", runner)
+	assert.Equal(t, JobID("id1"), job.GetID())
+	assert.Equal(t, PriorityDefault, job.GetPriority())
 }
 
-func TestJobResourceLimitExceeded(t *testing.T) {
+func TestSetPriority(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	job.SetPriority(PriorityHigh)
+	assert.Equal(t, PriorityHigh, job.GetPriority())
+}
 
-	_ = StartResourceMonitoring(1 * time.Second)
-	defer resetResourceStatsForTesting()
+func TestSetAndGetTimeout(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	job.SetTimeout(5 * time.Minute)
+	assert.Equal(t, 5*time.Minute, job.GetTimeout())
+}
 
-	j := &job{
-		//ctx:      context.Background(),
-		runnable: &mockRunnable{shouldFail: 0},
-		JobSpecification: JobSpecification{
-			ResourceLimits: ResourceLimits{
-				CPULimit:    0.1,
-				MemoryLimit: 1,
-			},
-		},
-	}
-	j.Run(context.Background())
-	assert.NotNil(t, j.GetLastRun())
+func TestSetAndGetMaxRetries(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	job.SetMaxRetries(5)
+	assert.Equal(t, uint(5), job.GetMaxRetries())
 }
 
-func TestJobSuccessful(t *testing.T) {
-	j := &job{
-		runnable: &mockRunnable{shouldFail: 0},
-		//	ctx:      context.Background(),
-	}
+func TestSetAndGetRunnable(t *testing.T) {
+	runner := &ShellRunnable{ScriptPath: "path"}
+	job := NewJob[ShellResult]("id1", runner)
+	assert.Equal(t, runner, job.GetRunnable())
+}
 
-	j.Run(context.Background())
-	assert.NotNil(t, j.GetLastRun())
+func TestSetAndGetRetryDelay(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	job.SetRetryDelay(2 * time.Second)
+	assert.Equal(t, 2*time.Second, job.GetRetryDelay())
 }
 
-func TestJobFailed(t *testing.T) {
-	j := &job{runnable: &mockRunnable{shouldFail: 1}}
-	j.Run(context.Background())
-	assert.NotNil(t, j.GetLastRun())
+func TestSetAndGetDependencies(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	job.SetDependencies([]JobID{"id2", "id3"})
+	assert.Equal(t, []JobID{"id2", "id3"}, job.GetDependencies())
 }
 
-func TestJobRetry(t *testing.T) {
-	j := &job{
-		runnable: &mockRunnable{shouldFail: 1},
-		JobSpecification: JobSpecification{
-			Retries: 1,
-		},
-	}
-	j.Run(context.Background())
+type TestScheduler struct{}
 
-	assert.NotNil(t, j.GetLastRun())
-	assert.Equal(t, 1, j.Stats.ErrorCount)
+func (s *TestScheduler) Schedule(job *GenericJob, eventBus *EventBus, stopChan chan bool) error {
+	return nil
 }
 
-func TestJobTimeout(t *testing.T) {
-	j := &job{
-		runnable: &mockRunnable{
-			shouldFail: 0,
-			sleep:      4 * time.Millisecond,
-		},
-		JobSpecification: JobSpecification{
-			Timeout: 1 * time.Millisecond,
-		},
-	}
-	j.Run(context.Background())
-	assert.NotNil(t, j.GetLastRun())
+func TestGetID(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	assert.Equal(t, JobID("id1"), job.GetID())
 }
 
-func TestNewJobFromJSON(t *testing.T) {
-	jsonStr := `{"id":"testJob","Priority":1}`
-	job, err := NewJobFromJSON(jsonStr)
+func TestGetRunnable(t *testing.T) {
+	runner := &ShellRunnable{ScriptPath: "path"}
+	job := NewJob[ShellResult]("id1", runner)
+	assert.Equal(t, runner, job.GetRunnable())
+}
 
-	assert.Nil(t, err)
-	assert.Equal(t, "testJob", job.Id.String())
-	assert.Equal(t, 1, job.Priority)
+func TestGetPriority(t *testing.T) {
+	job := NewJob[ShellResult]("id2", &ShellRunnable{})
+	assert.Equal(t, PriorityDefault, job.GetPriority())
 }
 
-func TestToFromJSON(t *testing.T) {
-	j := job{
-		JobSpecification: JobSpecification{
-			Id:       "testJob",
-			Priority: 1,
-		},
-		//ctx: context.Background(),
-	}
+func TestGetTimeout(t *testing.T) {
+	job := NewJob[ShellResult]("id2", &ShellRunnable{})
+	assert.Equal(t, time.Duration(0), job.GetTimeout())
+}
 
-	jsonStr, err := j.ToJSON()
-	assert.Nil(t, err)
+func TestGetMaxRetries(t *testing.T) {
+	job := NewJob[ShellResult]("id2", &ShellRunnable{})
+	assert.Equal(t, uint(0), job.GetMaxRetries())
+}
 
-	var job2 job
-	err = job2.FromJSON(jsonStr)
-	assert.Nil(t, err)
+func TestGetRetryDelay(t *testing.T) {
+	job := NewJob[ShellResult]("id2", &ShellRunnable{})
+	assert.Equal(t, time.Duration(0), job.GetRetryDelay())
+}
 
-	assert.Equal(t, "testJob", job2.Id.String())
+func TestGetDependencies(t *testing.T) {
+	job := NewJob[ShellResult]("id2", &ShellRunnable{})
+	assert.Equal(t, []JobID(nil), job.GetDependencies())
+}
 
+func TestSetDependencies(t *testing.T) {
+	job := NewJob[ShellResult]("id1", &ShellRunnable{})
+	job.SetDependencies([]JobID{"id2", "id3"})
+	assert.Equal(t, []JobID{"id2", "id3"}, job.GetDependencies())
 }
 
-func TestUnmarshalJSON(t *testing.T) {
-	jsonStr := `{"data":{"last_run":"0001-01-01T00:00:00Z","next_run":"0001-01-01T00:00:00Z","stats":{"run_count":0,"success_count":0,"error_count":0,"time_metrics":{"avg":0,"max":0,"min":0,"total":0},"last_error_code":0,"last_success_code":0,"priority_escalates":0,"resource_usage":{"cpu":{"avg":0,"std_dev":0},"memory":{"avg":0,"std_dev":0},"io":{"disk":{"avg":0,"std_dev":0},"network":{"avg":0,"std_dev":0}}}}},"spec":{"id":"testJob","priority":1,"resource_limits":{}}}`
-	var job job
-	err := json.Unmarshal([]byte(jsonStr), &job)
+func TestJobStats(t *testing.T) {
+	scriptContent := `#!/env/bin/bash 
+echo "Hello World"
+`
 
-	assert.Nil(t, err)
-	assert.Equal(t, "testJob", job.Id.String())
-	assert.Equal(t, 1, job.Priority)
-}
+	tmpDir := t.TempDir()
+	tmpFile := "example.sh"
+	tmpPath := path.Join(tmpDir, tmpFile)
 
-func TestNewJob(t *testing.T) {
-	job := newJob(JobSpecification{
-		Id: "testJob",
-	})
-	assert.NotNil(t, job)
-	assert.Equal(t, "testJob", job.Id.String())
-	assert.Equal(t, PriorityDefault, job.Priority)
+	tmpfile, err := os.Create(tmpPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpfile.Write([]byte(scriptContent))
+
+	defer tmpfile.Close()
+
+	job := NewJob[ShellResult]("id1", &ShellRunnable{ScriptPath: tmpfile.Name()})
+
+	// Simulate a successful run
+	result, err := job.Execute(context.Background()) // Assume Execute updates stats and returns no error
+	assert.NoError(t, err)
+	assert.NotNil(t, result)
+
+	stats := job.stats
+
+	assert.Equal(t, 1, stats.RunCount)
+	assert.Equal(t, 1, stats.SuccessCount)
+	assert.Equal(t, 0, stats.ErrorCount)
+	assert.NotEqual(t, 0, stats.TimeMetrics.TotalRunTime)
+	assert.NotEqual(t, 0, stats.TimeMetrics.AvgRunTime)
+	assert.NotEqual(t, 0, stats.TimeMetrics.MaxRunTime)
+	assert.NotEqual(t, 0, stats.TimeMetrics.MinRunTime)
+
+	// Simulate a failed run
+	job.Execute(context.Background()) // Assume Execute updates stats and returns an error
+	stats = job.stats
 
+	assert.Equal(t, 2, stats.RunCount)
+	assert.Equal(t, 2, stats.SuccessCount)
+	assert.Equal(t, 0, stats.ErrorCount)
 }
diff --git a/jobs.go b/jobs.go
deleted file mode 100644
index 0a22b53..0000000
--- a/jobs.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package jobqueue
-
-import (
-	"sync"
-	"time"
-)
-
-type JobsInterface interface {
-	GetJobs() map[JobIDType]ReadOnlyJob
-
-	GetExecutableJobs() map[JobIDType]ReadOnlyJob
-
-	AddJob(jobSpec JobSpecification, runnable Runnable) error
-
-	RemoveJob(id JobIDType) (bool, error)
-
-	GetJobStatus(id JobIDType) (JobStatus, error)
-
-	Cleanup()
-
-	GetFinishedJobs() map[JobIDType]ReadOnlyJob
-
-	GetFinishedJob(id JobIDType) ReadOnlyJob
-
-	RemoveFinishedJob(id JobIDType) (bool, error)
-
-	JobExists(id JobIDType) bool
-
-	GetJob(id JobIDType) ReadOnlyJob
-
-	GetJobsCount() int
-}
-
-type jobs struct {
-	jobs         map[JobIDType]*job
-	finishedJobs map[JobIDType]*job
-	mutex        sync.Mutex
-}
-
-// compile time check if jobs implements JobsInterface
-var _ JobsInterface = (*jobs)(nil)
-
-func (jq *jobs) GetJobsCount() int {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	return len(jq.jobs)
-}
-
-func (jq *jobs) Cleanup() {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	for id, job := range jq.jobs {
-		if job.Status == JobFinished {
-			jq.finishedJobs[id] = job
-			
-			delete(jq.jobs, id)
-		}
-	}
-}
-
-func (jq *jobs) GetFinishedJobs() map[JobIDType]ReadOnlyJob {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	jobs := make(map[JobIDType]ReadOnlyJob)
-	for id, job := range jq.finishedJobs {
-		jobs[id] = job // Implizites Casting zu ReadOnlyJob
-	}
-
-	return jobs
-}
-
-func (jq *jobs) GetFinishedJob(id JobIDType) ReadOnlyJob {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	if _, exists := jq.finishedJobs[id]; !exists {
-		return nil
-	}
-
-	return jq.finishedJobs[id]
-
-}
-
-func (jq *jobs) RemoveFinishedJob(id JobIDType) (bool, error) {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	if _, exists := jq.finishedJobs[id]; !exists {
-		return false, ErrJobNotFound
-	}
-
-	// Update internal data structures.
-	delete(jq.finishedJobs, id)
-	return true, nil
-}
-
-// GetJobs returns a map of all jobs.
-func (jq *jobs) GetJobs() map[JobIDType]ReadOnlyJob {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	jobs := make(map[JobIDType]ReadOnlyJob)
-	for id, job := range jq.jobs {
-		jobs[id] = job // Implizites Casting zu ReadOnlyJob
-	}
-
-	return jobs
-}
-
-// GetJobs returns a map of all jobs.
-func (jq *jobs) GetExecutableJobs() map[JobIDType]ReadOnlyJob {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	jobs := make(map[JobIDType]ReadOnlyJob)
-
-	tempJobs := make(map[JobIDType]*job)
-	for _, job := range jq.jobs {
-
-		if job.Status != JobPending {
-			continue
-		}
-
-		if job.NextRun.After(time.Now()) {
-			continue
-		}
-
-		tempJobs[job.Id] = job
-	}
-
-	sortedJobIDs, err := topologicalSortJobs(tempJobs)
-	if err != nil {
-		return nil
-	}
-
-	for _, id := range sortedJobIDs {
-		job := jq.jobs[id]
-		job.Status = JobScheduled
-		jobs[id] = jq.jobs[id]
-	}
-
-	return jobs
-}
-
-func (jq *jobs) JobExists(id JobIDType) bool {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	if _, exists := jq.jobs[id]; !exists {
-		return false
-	}
-
-	return true
-}
-
-func (jq *jobs) GetJob(id JobIDType) ReadOnlyJob {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	if _, exists := jq.jobs[id]; !exists {
-		return nil
-	}
-
-	return jq.jobs[id]
-
-}
-
-// NewJobs creates a new job queue.
-func NewJobs() *jobs {
-
-	jq := &jobs{
-		jobs:         make(map[JobIDType]*job),
-		finishedJobs: make(map[JobIDType]*job),
-		mutex:        sync.Mutex{},
-	}
-
-	return jq
-}
-
-// AddJob adds a new job to the queue.
-func (jq *jobs) AddJob(jobSpec JobSpecification, runnable Runnable) error {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	job := newJob(jobSpec)
-	job.runnable = runnable
-
-	if _, exists := jq.jobs[job.Id]; exists {
-		return ErrJobAlreadyExists
-	}
-
-	for _, dep := range job.Dependencies {
-		if _, exists := jq.jobs[dep]; !exists {
-			return ErrUnknownDependency
-		}
-	}
-
-	jq.jobs[job.Id] = job
-
-	return nil
-}
-
-// RemoveJob removes a job from the queue.
-func (jq *jobs) RemoveJob(id JobIDType) (bool, error) {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	if _, exists := jq.jobs[id]; !exists {
-		return false, ErrJobNotFound
-	}
-
-	// check if job is a dependency of another job
-	for _, job := range jq.jobs {
-		for _, dep := range job.Dependencies {
-			if dep == id {
-				return false, ErrJobIsDependency
-			}
-		}
-	}
-
-	// Update internal data structures.
-	delete(jq.jobs, id)
-	return true, nil
-}
-
-// GetJobStatus returns the status of a job.
-func (jq *jobs) GetJobStatus(id JobIDType) (JobStatus, error) {
-	jq.mutex.Lock()
-	defer jq.mutex.Unlock()
-
-	if _, exists := jq.jobs[id]; !exists {
-		return JobStatus(0), ErrJobNotFound
-	}
-
-	return jq.jobs[id].Status, nil
-
-}
diff --git a/jobs_test.go b/jobs_test.go
deleted file mode 100644
index ef74250..0000000
--- a/jobs_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package jobqueue
-
-import (
-	"testing"
-)
-
-func TestNewJobQueue(t *testing.T) {
-	jq := NewJobs()
-	if jq == nil {
-		t.Errorf("NewJobs returned nil")
-	}
-}
-
-func TestAddJob(t *testing.T) {
-	jq := NewJobs()
-
-	err := jq.AddJob(JobSpecification{
-		Id:       "1",
-		Priority: 1,
-		//Exclusive:   true,
-		MaxRuns:     3,
-		Concurrency: 2,
-	}, nil)
-	if err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-}
-
-func TestRemoveJob(t *testing.T) {
-	jq := NewJobs()
-
-	err := jq.AddJob(JobSpecification{
-		Id: "1",
-		// Set other fields
-	}, nil)
-	if err != nil {
-		t.Errorf("Failed to add job: %v", err)
-	}
-
-	removed, _ := jq.RemoveJob("1")
-	if !removed {
-		t.Errorf("Failed to remove job")
-	}
-}
diff --git a/json.go b/json.go
deleted file mode 100644
index b946a33..0000000
--- a/json.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package jobqueue
-
-import "github.com/robfig/cron/v3"
-import "encoding/json"
-
-// UnmarshalJSON unmarshals a job from json.
-func (j *job) UnmarshalJSON(data []byte) error {
-	type Alias job
-	aux := &struct {
-		Schedule string `json:"schedule"`
-		*Alias
-	}{
-		Alias: (*Alias)(j),
-	}
-
-	if err := json.Unmarshal(data, &aux); err != nil {
-		return err
-	}
-
-	if aux.Schedule != "" {
-		schedule, err := cron.ParseStandard(aux.Schedule)
-		if err != nil {
-			return err
-		}
-		j.scheduleImpl = schedule
-	}
-
-	return nil
-}
-
-// MarshalJSON marshals a job to json.
-func (j *job) MarshalJSON() ([]byte, error) {
-	type Alias job
-	aux := &struct {
-		*Alias
-	}{
-		Alias: (*Alias)(j),
-	}
-
-	return json.Marshal(aux)
-}
-
-// NewJobFromJSON creates a new job from a json string.
-func NewJobFromJSON(jsonStr string) (*job, error) {
-	var job JobSpecification
-	err := json.Unmarshal([]byte(jsonStr), &job)
-	if err != nil {
-		return nil, err
-	}
-	return newJob(job), nil
-}
-
-// ToJSON marshals a job to a json string.
-func (j *job) ToJSON() (string, error) {
-	data, err := json.Marshal(j)
-	if err != nil {
-		return "", err
-	}
-	return string(data), nil
-}
-
-// FromJSON unmarshals a job from a json string.
-func (j *job) FromJSON(jsonStr string) error {
-	return json.Unmarshal([]byte(jsonStr), j)
-}
diff --git a/json_test.go b/json_test.go
deleted file mode 100644
index 36a348f..0000000
--- a/json_test.go
+++ /dev/null
@@ -1 +0,0 @@
-package jobqueue
diff --git a/log-writer.go b/log-writer.go
deleted file mode 100644
index 8879499..0000000
--- a/log-writer.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package jobqueue
-
-import (
-	"bytes"
-	"fmt"
-	"os"
-	"sync"
-	"text/template"
-)
-
-// Logger ist ein generisches Log-Interface, das verschiedene Ausgabeziele unterstützt.
-type Logger interface {
-	// Write schreibt einen Log-Eintrag.
-	Write(entry JobLog) (n int, err error)
-
-	// Close schließt den Logger.
-	Close() error
-}
-
-// FileLogger is a logger that writes to a file.
-type FileLogger struct {
-	mu           sync.Mutex
-	logDir       string
-	maxLogSize   int64
-	maxLogFiles  int
-	currentLog   *os.File
-	currentSize  int64
-	logFileIndex int
-}
-
-// NewFileLogger creates a new FileLogger.
-func NewFileLogger(logDir string, maxLogSize int64, maxLogFiles int) (*FileLogger, error) {
-	if err := os.MkdirAll(logDir, 0755); err != nil {
-		return nil, err
-	}
-
-	lw := &FileLogger{
-		logDir:       logDir,
-		maxLogSize:   maxLogSize,
-		maxLogFiles:  maxLogFiles,
-		currentSize:  0,
-		logFileIndex: 1,
-	}
-
-	if err := lw.rotateLog(); err != nil {
-		return nil, err
-	}
-
-	return lw, nil
-}
-
-// Write the log entry to the current log file.
-func (lw *FileLogger) Write(entry JobLog) (n int, err error) {
-	lw.mu.Lock()
-	defer lw.mu.Unlock()
-
-	logTemplate := "{{.Timestamp}} {{.JobID}} {{.JobName}} {{.JobStatus}} {{.ExitCode}} {{.Duration}} {{.Message}}"
-	tmpl := template.New("log")
-	tmpl, err = tmpl.Parse(logTemplate)
-	if err != nil {
-		return 0, err
-	}
-
-	buffer := new(bytes.Buffer)
-	tmpl.Execute(buffer, entry)
-
-	n, err = lw.currentLog.Write(buffer.Bytes())
-	lw.currentSize += int64(n)
-
-	if lw.currentSize >= lw.maxLogSize {
-		_ = lw.rotateLog()
-	}
-
-	return n, err
-}
-
-// Close closes the current log file.
-func (lw *FileLogger) Close() error {
-	lw.mu.Lock()
-	defer lw.mu.Unlock()
-	return lw.currentLog.Close()
-}
-
-// rotateLog closes the current log file and opens a new one.
-func (lw *FileLogger) rotateLog() error {
-	lw.currentSize = 0
-
-	if lw.currentLog != nil {
-		if err := lw.currentLog.Close(); err != nil {
-			return err
-		}
-	}
-
-	logFileName := fmt.Sprintf("%s/job_log_%d.log", lw.logDir, lw.logFileIndex)
-	lw.logFileIndex++
-
-	if lw.logFileIndex > lw.maxLogFiles {
-		lw.logFileIndex = 1
-	}
-
-	f, err := os.Create(logFileName)
-	if err != nil {
-		return err
-	}
-
-	lw.currentLog = f
-	return nil
-}
diff --git a/manager.go b/manager.go
index ccf83ec..4b60620 100644
--- a/manager.go
+++ b/manager.go
@@ -1,152 +1,285 @@
 package jobqueue
 
 import (
-	"context"
-	"errors"
+	"fmt"
 	"sync"
-	"time"
 )
 
-type JobManager struct {
-	queue          *jobs
-	executor       *jobExecutor
-	mutex          sync.Mutex
-	maxConcurrency int
-}
+type ManagerState int
 
-// NewJobManager creates a new job manager with configurable concurrency and interval.
-func NewJobManager(maxConcurrency int, interval time.Duration, stopOnEmpty bool) *JobManager {
+const (
+	ManagerStateStopped = iota
+	ManagerStateRunning
+)
 
-	jq := NewJobs()
-	instance := NewJobExecutor(jq, maxConcurrency, interval,
-		func(executor *jobExecutor) bool {
-			if executor.Ctx.Err() != nil {
-				return true
-			}
+type Manager struct {
+	state ManagerState
 
-			if stopOnEmpty && executor.Queue.GetJobsCount() == 0 {
-				return true
-			}
+	queue     *Queue
+	workerMap map[WorkerID]Worker
+	eventBus  *EventBus
+	scheduled map[JobID]Scheduler
 
-			return false
-		})
+	jobEventCh chan interface{}
 
-	return &JobManager{
-		queue:          jq,
-		executor:       instance,
-		maxConcurrency: maxConcurrency,
-	}
+	mu sync.Mutex
 }
 
-func (jm *JobManager) GetJobs() map[JobIDType]ReadOnlyJob {
-	return jm.queue.GetJobs()
-}
+// NewManager initializes a new Manager
+func NewManager() *Manager {
 
-func (jm *JobManager) AddJob(jobSpec JobSpecification, runnable Runnable) error {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.queue.AddJob(jobSpec, runnable)
-}
+	eventBus := NewEventBus()
 
-func (jm *JobManager) GetJob(id JobIDType) ReadOnlyJob {
-	return jm.queue.GetJob(id)
+	return &Manager{
+		state:     ManagerStateStopped,
+		queue:     NewQueue(eventBus),
+		workerMap: make(map[WorkerID]Worker),
+		eventBus:  eventBus,
+		scheduled: make(map[JobID]Scheduler),
+	}
 }
 
-func (jm *JobManager) JobExists(id JobIDType) bool {
-	return jm.queue.JobExists(id)
+func (m *Manager) GetEventBus() *EventBus {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	return m.eventBus
 }
 
-func (jm *JobManager) RemoveJob(id JobIDType) (bool, error) {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.queue.RemoveJob(id)
-}
+func (m *Manager) checkAndSetRunningState() error {
 
-func (jm *JobManager) Start() error {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.executor.Start()
-}
+	m.state = ManagerStateStopped
 
-func (jm *JobManager) Stop() error {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.executor.Stop()
-}
+	if m.workerMap == nil {
+		return ErrNoWorkers
+	}
 
-func (jm *JobManager) Pause() error {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.executor.Pause()
-}
+	if len(m.workerMap) == 0 {
+		return ErrNoWorkers
+	}
 
-func (jm *JobManager) Resume() error {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.executor.Resume()
-}
+	for _, worker := range m.workerMap {
+		if worker.Status() == WorkerStatusRunning {
+			m.state = ManagerStateRunning
+			return nil
+		}
+	}
 
-func (jm *JobManager) GetFinishedJobs() map[JobIDType]ReadOnlyJob {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.queue.GetFinishedJobs()
+	return nil
 }
 
-func (jm *JobManager) GetFinishedJob(id JobIDType) ReadOnlyJob {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.queue.GetFinishedJob(id)
+// AddWorker adds a worker to the manager
+func (m *Manager) AddWorker(worker Worker) error {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// check if worker is already added
+	if _, ok := m.workerMap[worker.GetID()]; ok {
+		return ErrWorkerAlreadyAdded
+	}
+
+	// check if state of worker is not running
+	if worker.Status() != WorkerStatusStopped {
+		return ErrWorkerAlreadyRunning
+	}
+
+	if m.state == ManagerStateRunning {
+		err := worker.Start()
+		if err != nil {
+			return err
+		}
+	}
+
+	// add worker to workerMap
+	m.workerMap[worker.GetID()] = worker
+
+	return m.checkAndSetRunningState()
 }
 
-func (jm *JobManager) RemoveFinishedJob(id JobIDType) (bool, error) {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.queue.RemoveFinishedJob(id)
+// RemoveWorker removes a worker from the manager
+func (m *Manager) RemoveWorker(worker Worker) error {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	// check if worker is added
+	if _, ok := m.workerMap[worker.GetID()]; !ok {
+		return ErrWorkerNotAdded
+	}
+
+	// check if state of worker is not running
+	if worker.Status() != WorkerStatusStopped {
+		err := worker.Stop()
+		if err != nil {
+			return err
+		}
+	}
+
+	// remove worker from workerMap
+	delete(m.workerMap, worker.GetID())
+	err := m.checkAndSetRunningState()
+
+	if err != nil && err != ErrNoWorkers {
+		return err
+	}
+
+	return nil
+
 }
 
-func (jm *JobManager) OnStarted(hook func()) error {
+// Start starts the manager
+func (m *Manager) Start() error {
+	m.mu.Lock()
+	defer m.mu.Unlock()
 
-	timeout := 5 * time.Second
-	startTime := time.Now()
+	if m.state == ManagerStateRunning {
+		return ErrManagerAlreadyRunning
+	}
 
-	for {
-		if jm.executor.IsRunning() {
-			hook()
-			return nil
+	if len(m.workerMap) == 0 {
+		return ErrNoWorkers
+	}
+
+	var wrappedErr error
+
+	for _, worker := range m.workerMap {
+		err := worker.Start()
+		if err != nil && err != ErrWorkerAlreadyRunning {
+			if wrappedErr == nil {
+				wrappedErr = fmt.Errorf("Error: ")
+			}
+
+			wrappedErr = fmt.Errorf("%w\n%s", wrappedErr, err.Error())
 		}
+	}
 
-		if time.Since(startTime) >= timeout {
-			return ErrTimeout
+	// check if we have one worker
+	for _, worker := range m.workerMap {
+		if worker.Status() == WorkerStatusRunning {
+			m.state = ManagerStateRunning
+			break
 		}
+	}
+
+	m.jobEventCh = make(chan interface{}, 100)
+	m.eventBus.Subscribe(QueueJob, m.jobEventCh)
+	m.eventBus.Subscribe(JobReady, m.jobEventCh)
+	go m.handleJobEvents()
+
+	err := m.checkAndSetRunningState()
 
-		<-time.After(1 * time.Millisecond)
+	if err != nil {
+		wrappedErr = fmt.Errorf("%w\n%s", wrappedErr, err.Error())
 	}
 
+	return wrappedErr
+
 }
 
-func (jm *JobManager) Wait() error {
+// Stop stops the manager
+func (m *Manager) Stop() error {
+	m.mu.Lock()
+	defer m.mu.Unlock()
 
-	if jm.executor.Ctx.Err() != nil {
-		return jm.executor.Ctx.Err()
+	if m.state == ManagerStateStopped {
+		return ErrManagerAlreadyStopped
 	}
 
-	if jm.executor.IsRunning() {
-		<-jm.executor.Ctx.Done()
+	m.eventBus.Unsubscribe(QueueJob, m.jobEventCh)
+	m.eventBus.Unsubscribe(JobReady, m.jobEventCh)
+	close(m.jobEventCh)
 
-		if errors.Is(jm.executor.Ctx.Err(), context.Canceled) {
-			return nil
+	var wrappedErr error
+
+	for _, worker := range m.workerMap {
+		err := worker.Stop()
+		if err != nil && err != ErrWorkerAlreadyStopped {
+			if wrappedErr == nil {
+				wrappedErr = fmt.Errorf("Error: ")
+			}
+
+			wrappedErr = fmt.Errorf("%w\n%s", wrappedErr, err.Error())
 		}
+	}
 
-		return jm.executor.Ctx.Err()
+	err := m.checkAndSetRunningState()
 
+	if err != nil {
+		wrappedErr = fmt.Errorf("%w\n%s", wrappedErr, err.Error())
 	}
 
-	return ErrNotRunning
+	return wrappedErr
+}
 
+func (m *Manager) handleJobEvents() {
+
+	for event := range m.jobEventCh {
+		switch event := event.(type) {
+		case Event:
+
+			switch event.Name {
+			case QueueJob:
+				job := event.Data.(GenericJob)
+				err := m.queue.Enqueue(job)
+				if err != nil && err != ErrJobAlreadyExists {
+					fmt.Println(err)
+
+				}
+			case JobReady:
+				for {
+					nextJob, err := m.queue.Dequeue()
+					if err != nil {
+						break
+					}
+
+					for _, worker := range m.workerMap {
+						if err := worker.AssignJob(nextJob); err == nil {
+							break
+						}
+					}
+				}
+			}
+		}
+	}
 }
 
-func (jm *JobManager) IsRunning() bool {
-	jm.mutex.Lock()
-	defer jm.mutex.Unlock()
-	return jm.executor.IsRunning()
+// ScheduleJob schedules a job
+func (m *Manager) ScheduleJob(job GenericJob, scheduler Scheduler) error {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	if m.state != ManagerStateRunning {
+		return ErrManagerNotRunning
+	}
+
+	m.scheduled[job.GetID()] = scheduler
+
+	return scheduler.Schedule(job, m.eventBus)
+}
+
+// CancelJob cancels a scheduled job
+func (m *Manager) CancelJob(id JobID) error {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	if m.state != ManagerStateRunning {
+		return ErrManagerNotRunning
+	}
+
+	if _, ok := m.scheduled[id]; !ok {
+		return ErrJobNotScheduled
+	}
+
+	scheduler, ok := m.scheduled[id]
+	if !ok {
+		return ErrJobNotScheduled
+	}
+
+	err := scheduler.Cancel(id)
+	if err != nil {
+		return err
+	}
+
+	delete(m.scheduled, id)
+
+	return nil
+
 }
diff --git a/manager_test.go b/manager_test.go
new file mode 100644
index 0000000..fa12cf1
--- /dev/null
+++ b/manager_test.go
@@ -0,0 +1,196 @@
+package jobqueue
+
+import (
+	"context"
+	"github.com/stretchr/testify/assert"
+	"testing"
+	"time"
+)
+
+type MockWorker struct {
+	id     WorkerID
+	status WorkerStatus
+}
+
+func (m *MockWorker) Start() error {
+	m.status = WorkerStatusRunning
+	return nil
+}
+
+func (m *MockWorker) Stop() error {
+	m.status = WorkerStatusStopped
+	return nil
+}
+
+func (m *MockWorker) GetID() WorkerID {
+	return m.id
+}
+
+func (m *MockWorker) Status() WorkerStatus {
+	return m.status
+}
+
+func (m *MockWorker) AssignJob(job GenericJob) error {
+	return nil
+}
+
+type MockScheduler struct{}
+
+func (s *MockScheduler) Schedule(job *GenericJob, eventBus *EventBus, stopChan StopChan) error {
+	return nil
+}
+
+func (s *MockScheduler) Cancel(jobID JobID) error {
+	return nil
+}
+
+func (s *MockScheduler) GetNextRunTime(jobID JobID) time.Time {
+	return time.Now()
+}
+
+type MockGenericJob struct {
+	ID JobID
+}
+
+func (m *MockGenericJob) GetMaxRetries() uint {
+	return 0
+}
+
+func (m *MockGenericJob) GetRetryDelay() time.Duration {
+	return 0
+}
+
+func (m *MockGenericJob) GetTimeout() time.Duration {
+	return 0
+}
+
+func (m *MockGenericJob) GetID() JobID {
+	return m.ID
+}
+
+func (m *MockGenericJob) GetDependencies() []JobID {
+	return nil
+}
+
+func (m *MockGenericJob) GetPriority() Priority {
+	return PriorityDefault
+}
+
+func (m *MockGenericJob) Execute(ctx context.Context) (RunGenericResult, error) {
+	return nil, nil
+}
+
+func (m *MockGenericJob) Cancel() error {
+	return nil
+}
+
+func TestNewManager(t *testing.T) {
+	eventBus := NewEventBus()
+	manager := NewManager()
+
+	assert.NotNil(t, manager)
+	assert.Equal(t, ManagerState(ManagerStateStopped), manager.state)
+	assert.NotNil(t, manager.queue)
+	assert.NotNil(t, manager.workerMap)
+	assert.NotNil(t, manager.eventBus)
+	assert.Equal(t, eventBus, manager.eventBus)
+}
+
+func TestManager_AddWorker(t *testing.T) {
+	m := NewManager()
+	w := &MockWorker{id: "worker1", status: WorkerStatusStopped}
+
+	err := m.AddWorker(w)
+	assert.Nil(t, err)
+	assert.Equal(t, int(ManagerStateStopped), int(m.state))
+}
+
+func TestManager_RemoveWorker(t *testing.T) {
+	m := NewManager()
+	w := &MockWorker{id: "worker1", status: WorkerStatusStopped}
+	m.AddWorker(w)
+
+	err := m.RemoveWorker(w)
+	assert.Nil(t, err)
+	assert.Equal(t, int(ManagerStateStopped), int(m.state))
+}
+
+func TestManager_Start(t *testing.T) {
+	m := NewManager()
+	w := &MockWorker{id: "worker1", status: WorkerStatusStopped}
+	m.AddWorker(w)
+
+	err := m.Start()
+	assert.Nil(t, err)
+	assert.Equal(t, int(ManagerStateRunning), int(m.state))
+}
+
+func TestManager_Stop(t *testing.T) {
+	m := NewManager()
+	w := &MockWorker{id: "worker1", status: WorkerStatusStopped}
+	m.AddWorker(w)
+	m.Start()
+
+	err := m.Stop()
+	assert.Nil(t, err)
+	assert.Equal(t, int(ManagerStateStopped), int(m.state))
+}
+
+func TestManager_ScheduleJob(t *testing.T) {
+	m := NewManager()
+	w := &MockWorker{id: "worker1", status: WorkerStatusStopped}
+	m.AddWorker(w)
+	m.Start()
+
+	job := &MockGenericJob{ID: "job1"}
+	scheduler := InstantScheduler{}
+
+	err := m.ScheduleJob(job, &scheduler)
+	assert.Nil(t, err)
+}
+
+func TestManager_CancelJob(t *testing.T) {
+	m := NewManager()
+	w := &MockWorker{id: "worker1", status: WorkerStatusStopped}
+	m.AddWorker(w)
+	m.Start()
+
+	job := &MockGenericJob{ID: "job1"}
+	scheduler := InstantScheduler{}
+	m.ScheduleJob(job, &scheduler)
+
+	err := m.CancelJob("job1")
+	assert.Nil(t, err)
+}
+
+func TestManagerEventHandling(t *testing.T) {
+	mgr := NewManager()
+	worker := NewLocalWorker(1)
+	err := mgr.AddWorker(worker)
+	assert.Nil(t, err)
+
+	err = mgr.Start()
+	assert.Nil(t, err)
+
+	runner := &CounterRunnable{}
+	job := NewJob[CounterResult]("job1", runner)
+
+	scheduler := &IntervalScheduler{Interval: 1 * time.Millisecond}
+	err = mgr.ScheduleJob(job, scheduler)
+	assert.Nil(t, err)
+
+	startTime := time.Now()
+
+	for {
+		if job.runner.(*CounterRunnable).GetCount() > 10 {
+			break
+		}
+
+		if time.Since(startTime) > 10*time.Second {
+			t.Fatalf("Job did not finish in time")
+		}
+	}
+
+	err = mgr.Stop()
+	assert.Nil(t, err)
+}
diff --git a/manger_test.go b/manger_test.go
deleted file mode 100644
index d7ee287..0000000
--- a/manger_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package jobqueue
-
-import (
-	"github.com/stretchr/testify/assert"
-	//_ "net/http/pprof"
-	"sync"
-	"sync/atomic"
-	"testing"
-	"time"
-)
-
-func TestRepeatNewJobManagerPauseAndResume(t *testing.T) {
-	numRepeats := 10 // Anzahl der Wiederholungen
-
-	for i := 0; i < numRepeats; i++ {
-		t.Logf("Repeat %d\n", i+1)
-		dewJobManagerPauseAndResume(t)
-	}
-}
-
-func dewJobManagerPauseAndResume(t *testing.T) {
-
-	var wg sync.WaitGroup
-
-	tickerTime := 1 * time.Microsecond
-	waitBeforePause := 5 * tickerTime
-	doPause := 5 * tickerTime
-	maxRuns := 3
-
-	jm := NewJobManager(1, tickerTime, false)
-	if jm == nil {
-		t.Errorf("NewJobManager returned nil")
-	}
-
-	err := jm.AddJob(JobSpecification{
-		Id:      "1",
-		MaxRuns: maxRuns,
-	}, &ExternalProcessRunner{
-		Command: "sleep",
-		Args:    []string{"1"},
-	})
-
-	assert.Nil(t, err)
-
-	job := jm.GetJob("1")
-	assert.NotNil(t, job)
-
-	var isPaused int32
-
-	err = jm.Start()
-	assert.Nil(t, err)
-
-	// Timer für 5 Sekunden
-	timer := time.NewTimer(10 * time.Second)
-
-	// anonymous function to stop the job manager after the timer has expired
-	go func() {
-		<-timer.C // wait for the timer to expire
-		err := jm.Stop()
-		assert.Nil(t, err)
-	}()
-
-	defer func() {
-		err := jm.Stop()
-		if err != nil {
-			assert.ErrorIs(t, err, ErrAlreadyStopped)
-		}
-	}()
-
-	// Go routine to pause and resume the job manager
-	go func() {
-		time.Sleep(waitBeforePause)
-		err := jm.Pause()
-		assert.Nil(t, err)
-		atomic.StoreInt32(&isPaused, 1)
-
-		time.Sleep(doPause)
-
-		err = jm.Resume()
-		assert.Nil(t, err)
-		atomic.StoreInt32(&isPaused, 0)
-	}()
-
-	wg.Add(1)
-	err = jm.OnStarted(func() {
-		defer wg.Done()
-		err := jm.Wait()
-		assert.Nil(t, err)
-
-		finishedJob := jm.GetFinishedJob("1")
-		assert.NotNil(t, finishedJob)
-
-		paused := atomic.LoadInt32(&isPaused) == 1
-		assert.False(t, paused, "Job manager should not be paused")
-
-	})
-
-	assert.Nil(t, err)
-	wg.Wait()
-
-}
-
-func TestNewJobManager(t *testing.T) {
-
-	doRuns := 5
-
-	jm := NewJobManager(10, 1*time.Microsecond, true)
-	if jm == nil {
-		t.Errorf("NewJobManager returned nil")
-	}
-
-	err := jm.AddJob(JobSpecification{
-		Id:      "1",
-		MaxRuns: doRuns,
-	}, &ExternalProcessRunner{
-		Command: "echo",
-		Args:    []string{"hello"},
-	})
-	assert.Nil(t, err)
-
-	err = jm.Start()
-
-	err = jm.OnStarted(func() {
-
-		assert.Nil(t, err)
-		assert.True(t, jm.IsRunning())
-
-		job := jm.GetJob("1")
-		assert.NotNil(t, job)
-
-		defer func() {
-			err := jm.Stop()
-			if err != nil {
-				assert.ErrorIs(t, err, ErrAlreadyStopped)
-			}
-		}()
-
-		err = jm.Wait()
-		assert.Nil(t, err)
-
-		finishedJob := jm.GetFinishedJob("1")
-		assert.NotNil(t, finishedJob)
-
-		runs := finishedJob.GetRuns()
-		assert.Equal(t, doRuns, runs)
-
-		logs := finishedJob.GetLogs()
-		assert.Equal(t, doRuns, len(logs))
-
-		stats := finishedJob.GetStats()
-		assert.Equal(t, doRuns, stats.SuccessCount)
-		assert.Equal(t, 0, stats.ErrorCount)
-
-	})
-
-	assert.Nil(t, err)
-
-}
diff --git a/prority.go b/prority.go
deleted file mode 100644
index 9efa113..0000000
--- a/prority.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package jobqueue
-
-const (
-	_ int = iota * 10
-	PriorityLow
-	PriorityDefault
-	PriorityHigh
-	PriorityCritical
-)
diff --git a/queue.go b/queue.go
new file mode 100644
index 0000000..5e2f653
--- /dev/null
+++ b/queue.go
@@ -0,0 +1,120 @@
+package jobqueue
+
+import (
+	"sync"
+)
+
+type Queue struct {
+	jobMap              map[JobID]GenericJob
+	pendingDependencies map[JobID][]JobID
+	readyQueue          []GenericJob
+	processedJobs       map[JobID]struct{}
+	eventBus            *EventBus
+	mu                  sync.Mutex
+}
+
+func NewQueue(EventBus *EventBus) *Queue {
+	return &Queue{
+		jobMap:              make(map[JobID]GenericJob),
+		pendingDependencies: make(map[JobID][]JobID),
+		readyQueue:          []GenericJob{},
+		processedJobs:       make(map[JobID]struct{}),
+		eventBus:            EventBus,
+	}
+}
+
+func (q *Queue) Enqueue(job GenericJob) error {
+	q.mu.Lock()
+	defer q.mu.Unlock()
+
+	if _, exists := q.jobMap[job.GetID()]; !exists {
+		q.jobMap[job.GetID()] = job
+		//return ErrJobAlreadyExists
+	}
+
+	for _, readyJob := range q.readyQueue {
+		if readyJob.GetID() == job.GetID() {
+			return ErrJobAlreadyExists
+		}
+	}
+
+	// Check if this job is a dependency for any pending jobs
+	for pendingJobID, pendingDeps := range q.pendingDependencies {
+		q.pendingDependencies[pendingJobID] = removeJobID(pendingDeps, job.GetID())
+		if len(q.pendingDependencies[pendingJobID]) == 0 {
+			q.readyQueue = append(q.readyQueue, q.jobMap[pendingJobID])
+			delete(q.pendingDependencies, pendingJobID)
+		}
+	}
+
+	// Check this job's dependencies
+	var unmetDependencies []JobID
+	for _, depID := range job.GetDependencies() {
+		if _, ok := q.jobMap[depID]; !ok {
+			unmetDependencies = append(unmetDependencies, depID)
+		}
+	}
+
+	if len(unmetDependencies) > 0 {
+		q.pendingDependencies[job.GetID()] = unmetDependencies
+	} else {
+		q.readyQueue = append(q.readyQueue, job)
+
+		// Run topological sort on jobs in the ready queue
+		readyJobList := []GenericJob{}
+		for _, readyJob := range q.readyQueue {
+			readyJobList = append(readyJobList, readyJob)
+		}
+
+		for id := range q.processedJobs {
+			readyJobList = append(readyJobList, q.jobMap[id])
+		}
+
+		sortedIDs, err := topologicalSortJobs(readyJobList)
+		if err != nil {
+			return err
+		}
+
+		// Reorder q.readyQueue based on sorted job IDs
+		newReadyQueue := make([]GenericJob, len(sortedIDs))
+		for i, id := range sortedIDs {
+			newReadyQueue[i] = q.jobMap[id]
+		}
+		q.readyQueue = newReadyQueue
+
+		if q.eventBus != nil && len(q.readyQueue) > 0 {
+			q.eventBus.Publish(JobReady, job.GetID())
+		}
+
+	}
+
+	return nil
+}
+
+func (q *Queue) Dequeue() (GenericJob, error) {
+	q.mu.Lock()
+	defer q.mu.Unlock()
+
+	if len(q.readyQueue) == 0 {
+		return nil, ErrQueueEmpty
+	}
+
+	job := q.readyQueue[0]
+	q.readyQueue = q.readyQueue[1:]
+
+	// Mark the job as processed but keep it in the jobMap for dependency resolution
+	q.processedJobs[job.GetID()] = struct{}{}
+
+	return job, nil
+}
+
+func removeJobID(deps []JobID, id JobID) []JobID {
+	for i, dep := range deps {
+		if dep == id {
+			deps[i] = deps[len(deps)-1]
+			return deps[:len(deps)-1]
+		}
+	}
+
+	return deps
+}
diff --git a/queue_test.go b/queue_test.go
new file mode 100644
index 0000000..301de56
--- /dev/null
+++ b/queue_test.go
@@ -0,0 +1,220 @@
+package jobqueue
+
+import (
+	"testing"
+)
+
+func TestEnqueueJobAlreadyExists(t *testing.T) {
+	runner := &DummyRunnable{}
+	job := NewJob[DummyResult](JobID("1"), runner)
+	q := NewQueue(nil)
+
+	_ = q.Enqueue(job)
+	err := q.Enqueue(job)
+	if err != ErrJobAlreadyExists {
+		t.Fatalf("Expected ErrJobAlreadyExists, got %v", err)
+	}
+}
+
+func TestEnqueueAndDequeue(t *testing.T) {
+
+	runner := &DummyRunnable{}
+
+	q := NewQueue(nil)
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+	job1.SetPriority(PriorityHigh)
+	job2 := NewJob[DummyResult](JobID("2"), runner)
+	_ = q.Enqueue(job1)
+	_ = q.Enqueue(job2)
+	dequeuedJob, err := q.Dequeue()
+	if err != nil || dequeuedJob.GetID() != JobID("1") {
+		t.Fatalf("Unexpected dequeue result: jobID %s, err %v", dequeuedJob.GetID(), err)
+	}
+}
+
+func TestEnqueueAndDequeue2(t *testing.T) {
+
+	runner := &DummyRunnable{}
+
+	q := NewQueue(nil)
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+	job2 := NewJob[DummyResult](JobID("2"), runner)
+
+	job2.AddDependency(JobID("1"))
+
+	_ = q.Enqueue(job1)
+	_ = q.Enqueue(job2)
+	dequeuedJob, err := q.Dequeue()
+	if err != nil {
+		t.Fatalf("Unexpected error: %v", err)
+	}
+
+	if dequeuedJob.GetID() != JobID("1") {
+		t.Fatalf("Unexpected dequeue result: jobID %s", dequeuedJob.GetID())
+	}
+}
+
+func TestDependencyResolution(t *testing.T) {
+	runner := &DummyRunnable{}
+	q := NewQueue(nil)
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+	job2 := NewJob[DummyResult](JobID("2"), runner)
+	job3 := NewJob[DummyResult](JobID("3"), runner)
+
+	_ = q.Enqueue(job3)
+	_ = q.Enqueue(job2)
+	_ = q.Enqueue(job1)
+
+	contains := func(arr []JobID, id JobID) bool {
+		for _, v := range arr {
+			if v == id {
+				return true
+			}
+		}
+		return false
+	}
+
+	possibleJobIDs := []JobID{"1", "2", "3"}
+
+	job, _ := q.Dequeue()
+	if !contains(possibleJobIDs, job.GetID()) {
+		t.Fatalf("Expected jobID in %v, got %s", possibleJobIDs, job.GetID())
+	}
+
+	// remove jobID from possibleJobIDs
+	for i, v := range possibleJobIDs {
+		if v == job.GetID() {
+			possibleJobIDs = append(possibleJobIDs[:i], possibleJobIDs[i+1:]...)
+		}
+	}
+
+	job, _ = q.Dequeue()
+	if !contains(possibleJobIDs, job.GetID()) {
+		t.Fatalf("Expected jobID in %v, got %s", possibleJobIDs, job.GetID())
+	}
+
+	// remove jobID from possibleJobIDs
+	for i, v := range possibleJobIDs {
+		if v == job.GetID() {
+			possibleJobIDs = append(possibleJobIDs[:i], possibleJobIDs[i+1:]...)
+		}
+	}
+
+	job, _ = q.Dequeue()
+	if !contains(possibleJobIDs, job.GetID()) {
+		t.Fatalf("Expected jobID in %v, got %s", possibleJobIDs, job.GetID())
+	}
+
+	// remove jobID from possibleJobIDs
+	for i, v := range possibleJobIDs {
+		if v == job.GetID() {
+			possibleJobIDs = append(possibleJobIDs[:i], possibleJobIDs[i+1:]...)
+		}
+	}
+
+	if len(possibleJobIDs) != 0 {
+		t.Fatalf("Expected no jobIDs left in %v", possibleJobIDs)
+	}
+
+}
+
+func TestDequeueEmptyQueue(t *testing.T) {
+	q := NewQueue(nil)
+	_, err := q.Dequeue()
+	if err != ErrQueueEmpty {
+		t.Fatalf("Expected ErrQueueEmpty, got %v", err)
+	}
+}
+
+func TestProcessedJobs(t *testing.T) {
+	q := NewQueue(nil)
+	runner := &DummyRunnable{}
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+	job2 := NewJob[DummyResult](JobID("2"), runner)
+
+	_ = q.Enqueue(job1)
+	_, _ = q.Dequeue()
+	_ = q.Enqueue(job2)
+
+	_, err := q.Dequeue()
+	if err != nil {
+		t.Fatalf("Dequeue failed: %v", err)
+	}
+
+	if _, exists := q.processedJobs[job1.GetID()]; !exists {
+		t.Fatalf("Job 1 not marked as processed")
+	}
+}
+
+func TestCyclicDependencies(t *testing.T) {
+	runner := &DummyRunnable{}
+	q := NewQueue(nil)
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+	job2 := NewJob[DummyResult](JobID("2"), runner)
+	job3 := NewJob[DummyResult](JobID("3"), runner)
+
+	job1.AddDependency(JobID("2"))
+	job2.AddDependency(JobID("3"))
+	job3.AddDependency(JobID("1"))
+
+	err := q.Enqueue(job1)
+	if err != nil {
+		t.Fatalf("Enqueue failed: %v", err)
+	}
+
+	err = q.Enqueue(job2)
+	if err != nil {
+		t.Fatalf("Enqueue failed: %v", err)
+	}
+
+	err = q.Enqueue(job3)
+	if err == nil || err != ErrCycleDetected {
+		t.Fatalf("Expected ErrCyclicDependency, got %v", err)
+	}
+}
+
+func TestDuplicateDependencies(t *testing.T) {
+	runner := &DummyRunnable{}
+	q := NewQueue(nil)
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+	job2 := NewJob[DummyResult](JobID("2"), runner)
+
+	job2.AddDependency(JobID("1"))
+	job2.AddDependency(JobID("1"))
+
+	_ = q.Enqueue(job1)
+	err := q.Enqueue(job2)
+	if err != nil {
+		t.Fatalf("Enqueue failed: %v", err)
+	}
+
+	_, err = q.Dequeue()
+	if err != nil {
+		t.Fatalf("Dequeue failed: %v", err)
+	}
+
+	_, err = q.Dequeue()
+	if err != nil {
+		t.Fatalf("Dequeue failed: %v", err)
+	}
+
+	if len(q.processedJobs) != 2 {
+		t.Fatalf("Expected 2 processed jobs, got %d", len(q.processedJobs))
+	}
+
+}
+
+func TestJobWithSelfAsDependency(t *testing.T) {
+	runner := &DummyRunnable{}
+	q := NewQueue(nil)
+	job1 := NewJob[DummyResult](JobID("1"), runner)
+
+	job1.AddDependency(JobID("1"))
+
+	err := q.Enqueue(job1)
+	if err == nil || err != ErrCycleDetected {
+		t.Fatalf("Expected ErrCycleDetected, got %v", err)
+	}
+}
+
+// Continue with other test cases...
diff --git a/runnable-counter.go b/runnable-counter.go
new file mode 100644
index 0000000..780768c
--- /dev/null
+++ b/runnable-counter.go
@@ -0,0 +1,38 @@
+package jobqueue
+
+import (
+	"sync"
+)
+
+// CounterResult is a result of a counter
+type CounterResult struct {
+	Count int
+}
+
+// CounterRunnable is a runnable that counts
+type CounterRunnable struct {
+	Count int
+	mu    sync.Mutex
+}
+
+// GetCount returns the current count
+func (c *CounterRunnable) GetCount() int {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.Count
+}
+
+// Run runs the counter
+func (c *CounterRunnable) Run() (RunResult[CounterResult], error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	c.Count++
+
+	return RunResult[CounterResult]{
+		Status: ResultStatusSuccess,
+		Data: CounterResult{
+			Count: c.Count,
+		},
+	}, nil
+}
diff --git a/runnable-counter_test.go b/runnable-counter_test.go
new file mode 100644
index 0000000..55d28ad
--- /dev/null
+++ b/runnable-counter_test.go
@@ -0,0 +1,24 @@
+package jobqueue
+
+import (
+	"testing"
+)
+
+func TestRunnableCounter(t *testing.T) {
+
+	runner := &CounterRunnable{}
+
+	r, err := runner.Run()
+	if err != nil {
+		t.Errorf("Unexpected error: %v", err)
+	}
+
+	if r.Status != ResultStatusSuccess {
+		t.Errorf("Unexpected result status: %v", r.Status)
+	}
+
+	if r.Data.Count != 1 {
+		t.Errorf("Unexpected result data: %v", r.Data)
+	}
+
+}
diff --git a/runnable-dummy.go b/runnable-dummy.go
new file mode 100644
index 0000000..6e86990
--- /dev/null
+++ b/runnable-dummy.go
@@ -0,0 +1,13 @@
+package jobqueue
+
+// DummyResult is a dummy result
+type DummyResult struct {
+}
+
+type DummyRunnable struct{}
+
+func (d *DummyRunnable) Run() (RunResult[DummyResult], error) {
+	return RunResult[DummyResult]{
+		Status: ResultStatusSuccess,
+	}, nil
+}
diff --git a/runnable-dummy_test.go b/runnable-dummy_test.go
new file mode 100644
index 0000000..b056ba1
--- /dev/null
+++ b/runnable-dummy_test.go
@@ -0,0 +1,16 @@
+package jobqueue
+
+import (
+	"testing"
+)
+
+func TestDummyRunnable(t *testing.T) {
+	
+	runner := &DummyRunnable{}
+
+	_, err := runner.Run()
+	if err != nil {
+		t.Errorf("Unexpected error: %v", err)
+	}
+
+}
diff --git a/runnable-fileoperation.go b/runnable-fileoperation.go
new file mode 100644
index 0000000..1aa1e0c
--- /dev/null
+++ b/runnable-fileoperation.go
@@ -0,0 +1,99 @@
+package jobqueue
+
+import (
+	"os"
+)
+
+type FileOperationResult struct {
+	Success bool
+	Content string // Optional, je nach Operation
+}
+
+const (
+	FileOperationRead   = "read"
+	FileOperationWrite  = "write"
+	FileOperationDelete = "delete"
+	FileOperationAppend = "append"
+	FileOperationCreate = "create"
+)
+
+type FileOperationRunnable struct {
+	Operation string // z.B. "read", "write", "delete"
+	FilePath  string
+	Content   string // Optional, je nach Operation
+}
+
+func (f *FileOperationRunnable) Run() (RunResult[FileOperationResult], error) {
+	switch f.Operation {
+	case FileOperationRead:
+		content, err := os.ReadFile(f.FilePath)
+		if err != nil {
+			return RunResult[FileOperationResult]{Status: ResultStatusFailed}, err
+		}
+		return RunResult[FileOperationResult]{
+			Status: ResultStatusSuccess,
+			Data: FileOperationResult{
+				Success: true,
+				Content: string(content),
+			},
+		}, nil
+	case FileOperationWrite:
+		err := os.WriteFile(f.FilePath, []byte(f.Content), 0644)
+		if err != nil {
+			return RunResult[FileOperationResult]{Status: ResultStatusFailed}, err
+		}
+		return RunResult[FileOperationResult]{
+			Status: ResultStatusSuccess,
+			Data: FileOperationResult{
+				Success: true,
+			},
+		}, nil
+	case FileOperationDelete:
+		err := os.Remove(f.FilePath)
+		if err != nil {
+			return RunResult[FileOperationResult]{Status: ResultStatusFailed}, err
+		}
+		return RunResult[FileOperationResult]{
+			Status: ResultStatusSuccess,
+			Data: FileOperationResult{
+				Success: true,
+			},
+		}, nil
+
+	case FileOperationAppend:
+		fp, err := os.OpenFile(f.FilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+		if err != nil {
+			return RunResult[FileOperationResult]{Status: ResultStatusFailed}, err
+		}
+
+		defer fp.Close()
+
+		if _, err := fp.WriteString(f.Content); err != nil {
+			return RunResult[FileOperationResult]{Status: ResultStatusFailed}, err
+		}
+
+		return RunResult[FileOperationResult]{
+			Status: ResultStatusSuccess,
+			Data: FileOperationResult{
+				Success: true,
+			},
+		}, nil
+
+	case FileOperationCreate:
+		f, err := os.Create(f.FilePath)
+		if err != nil {
+			return RunResult[FileOperationResult]{Status: ResultStatusFailed}, err
+		}
+		defer f.Close()
+
+		return RunResult[FileOperationResult]{
+			Status: ResultStatusSuccess,
+			Data: FileOperationResult{
+				Success: true,
+			},
+		}, nil
+
+	default:
+		return RunResult[FileOperationResult]{Status: ResultStatusFailed}, ErrUnsupportedFileOption
+	}
+}
diff --git a/runnable-fileoperation_test.go b/runnable-fileoperation_test.go
new file mode 100644
index 0000000..c62c91f
--- /dev/null
+++ b/runnable-fileoperation_test.go
@@ -0,0 +1,67 @@
+package jobqueue
+
+import (
+	"io/ioutil"
+	"os"
+	"path"
+	"testing"
+)
+
+func TestFileOperationRunnable(t *testing.T) {
+
+	dir := t.TempDir()
+	err := os.Chdir(dir)
+	if err != nil {
+		t.Fatalf("Failed to change directory: %v", err)
+	}
+
+	testFilePath := path.Join(dir, "test.txt")
+	testContent := "Hello, World!"
+
+	// Test FileOperationCreate
+	createRunner := FileOperationRunnable{Operation: FileOperationCreate, FilePath: testFilePath}
+	_, err = createRunner.Run()
+	if err != nil {
+		t.Fatalf("Failed to create file: %v", err)
+	}
+
+	// Test FileOperationWrite
+	writeRunner := FileOperationRunnable{Operation: FileOperationWrite, FilePath: testFilePath, Content: testContent}
+	_, err = writeRunner.Run()
+	if err != nil {
+		t.Fatalf("Failed to write to file: %v", err)
+	}
+
+	// Test FileOperationRead
+	readRunner := FileOperationRunnable{Operation: FileOperationRead, FilePath: testFilePath}
+	result, err := readRunner.Run()
+	if err != nil || result.Data.Content != testContent {
+		t.Fatalf("Failed to read from file: %v", err)
+	}
+
+	// Test FileOperationAppend
+	appendContent := " Appended."
+	appendRunner := FileOperationRunnable{Operation: FileOperationAppend, FilePath: testFilePath, Content: appendContent}
+	_, err = appendRunner.Run()
+	if err != nil {
+		t.Fatalf("Failed to append to file: %v", err)
+	}
+
+	// Re-verify content after append
+	updatedContent, _ := ioutil.ReadFile(testFilePath)
+	if string(updatedContent) != testContent+appendContent {
+		t.Fatalf("Append operation failed.")
+	}
+
+	// Test FileOperationDelete
+	deleteRunner := FileOperationRunnable{Operation: FileOperationDelete, FilePath: testFilePath}
+	_, err = deleteRunner.Run()
+	if err != nil {
+		t.Fatalf("Failed to delete file: %v", err)
+	}
+
+	// Verify the file is deleted
+	if _, err := os.Stat(testFilePath); !os.IsNotExist(err) {
+		t.Fatalf("File deletion failed.")
+	}
+}
diff --git a/runnable-gorm.go b/runnable-gorm.go
new file mode 100644
index 0000000..53cb581
--- /dev/null
+++ b/runnable-gorm.go
@@ -0,0 +1,55 @@
+package jobqueue
+
+import (
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+)
+
+// DBResult is a result of a db query
+type DBResult struct {
+	RowsAffected int
+}
+
+type DBRunnable struct {
+	Type  string
+	DSN   string
+	Query string
+	db    *gorm.DB // internal for testing
+}
+
+func (d *DBRunnable) Run() (RunResult[DBResult], error) {
+	var db *gorm.DB
+	var err error
+
+	if d.db == nil {
+
+		switch d.Type {
+		case "mysql":
+			db, err = gorm.Open(mysql.Open(d.DSN), &gorm.Config{})
+
+		default:
+			return RunResult[DBResult]{Status: ResultStatusFailed}, ErrUnsupportedDatabaseType
+		}
+	} else {
+		db = d.db
+	}
+
+	if err != nil {
+		return RunResult[DBResult]{Status: ResultStatusFailed}, err
+	}
+
+	var result *gorm.DB
+
+	result = db.Exec(d.Query)
+
+	if result.Error != nil {
+		return RunResult[DBResult]{Status: ResultStatusFailed}, result.Error
+	}
+
+	return RunResult[DBResult]{
+		Status: ResultStatusSuccess,
+		Data: DBResult{
+			RowsAffected: int(result.RowsAffected),
+		},
+	}, nil
+}
diff --git a/runnable-gorm_test.go b/runnable-gorm_test.go
new file mode 100644
index 0000000..e124a0e
--- /dev/null
+++ b/runnable-gorm_test.go
@@ -0,0 +1,46 @@
+package jobqueue
+
+import (
+	"github.com/DATA-DOG/go-sqlmock"
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+	"testing"
+)
+
+func TestDBRunnable_Run(t *testing.T) {
+	// Mock-DB erstellen
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Could not create mock: %s", err)
+	}
+	gormDB, _ := gorm.Open(mysql.New(mysql.Config{
+		Conn:                      db,
+		SkipInitializeWithVersion: true,
+	}), &gorm.Config{})
+
+	//existsRows := sqlmock.NewRows([]string{"exists"}).
+	//	AddRow(true)
+
+	//mock.ExpectQuery("SELECT EXISTS \\( SELECT 1 FROM information_schema\\.tables WHERE table_schema = 'public' AND table_name = 'myTable3' \\);").
+	//	WillReturnRows(existsRows)
+
+	mock.ExpectExec("SELECT \\* FROM table_name").WillReturnResult(sqlmock.NewResult(1, 1))
+
+	// Erstellen Sie die zu testende Instanz
+	runnable := &DBRunnable{
+		Type:  "mysql",
+		db:    gormDB, // Injizierte Mock-DB
+		Query: "SELECT * FROM table_name",
+	}
+
+	// Rufen Sie die Run()-Methode auf und überprüfen Sie die Ergebnisse
+	result, err := runnable.Run()
+
+	// Überprüfungen hier
+	if err != nil {
+		t.Fatalf("Failed to run: %s", err)
+	}
+	if result.Status != ResultStatusSuccess {
+		t.Fatalf("Expected success, got: %d", result.Status)
+	}
+}
diff --git a/runnable-http.go b/runnable-http.go
new file mode 100644
index 0000000..b1f498a
--- /dev/null
+++ b/runnable-http.go
@@ -0,0 +1,53 @@
+package jobqueue
+
+import (
+	"bytes"
+	"io"
+	"net/http"
+)
+
+// HTTPResult is a result of a http request
+type HTTPResult struct {
+	StatusCode int
+	Body       string
+}
+
+type HTTPRunnable struct {
+	URL    string
+	Method string
+	Header map[string]string
+	Body   string
+}
+
+func (h *HTTPRunnable) Run() (RunResult[HTTPResult], error) {
+	client := &http.Client{}
+
+	reqBody := bytes.NewBufferString(h.Body)
+	req, err := http.NewRequest(h.Method, h.URL, reqBody)
+	if err != nil {
+		return RunResult[HTTPResult]{Status: ResultStatusFailed}, err
+	}
+
+	for key, value := range h.Header {
+		req.Header.Set(key, value)
+	}
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return RunResult[HTTPResult]{Status: ResultStatusFailed}, err
+	}
+	defer resp.Body.Close()
+
+	body, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return RunResult[HTTPResult]{Status: ResultStatusFailed}, err
+	}
+
+	return RunResult[HTTPResult]{
+		Status: ResultStatusSuccess,
+		Data: HTTPResult{
+			StatusCode: resp.StatusCode,
+			Body:       string(body),
+		},
+	}, nil
+}
diff --git a/runnable-http_test.go b/runnable-http_test.go
new file mode 100644
index 0000000..e867793
--- /dev/null
+++ b/runnable-http_test.go
@@ -0,0 +1,36 @@
+package jobqueue
+
+import (
+	"github.com/stretchr/testify/assert"
+	"net/http"
+	"net/http/httptest"
+	"testing"
+)
+
+func TestHTTPRunnable_Run(t *testing.T) {
+
+	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		_, _ = w.Write([]byte("Hello, world!"))
+	}))
+	defer server.Close()
+
+	httpRunnable := &HTTPRunnable{
+		URL:    server.URL,
+		Method: "GET",
+		Header: map[string]string{
+			"Content-Type": "application/json",
+		},
+		Body: "",
+	}
+
+	result, err := httpRunnable.Run()
+
+	// Assertions
+	assert.NoError(t, err)
+	assert.Equal(t, ResultStatusSuccess, result.Status)
+	assert.IsType(t, HTTPResult{}, result.Data)
+
+	httpResult := result.Data
+	assert.Equal(t, 200, httpResult.StatusCode)
+	assert.Equal(t, "Hello, world!", httpResult.Body)
+}
diff --git a/runnable-mail.go b/runnable-mail.go
new file mode 100644
index 0000000..9927096
--- /dev/null
+++ b/runnable-mail.go
@@ -0,0 +1,79 @@
+package jobqueue
+
+import (
+	"net/smtp"
+)
+
+// MailResult is a result of a email
+type MailResult struct {
+	Sent           bool
+	ServerReply    string
+	SmtpStatusCode uint
+}
+
+type MailRunnable struct {
+	To       string
+	From     string
+	Subject  string
+	Body     string
+	Server   string
+	Port     string
+	Username string
+	Password string
+	Headers  map[string]string
+}
+
+func (m *MailRunnable) Run() (RunResult[MailResult], error) {
+
+	smtpServer := m.Server + ":" + m.Port
+
+	// Connect to the remote SMTP server.
+	client, err := smtp.Dial(smtpServer)
+	if err != nil {
+		return RunResult[MailResult]{Status: ResultStatusFailed}, err
+	}
+
+	if client != nil {
+		defer client.Close()
+	}
+
+	if m.Username != "" && m.Password != "" {
+		if err := client.Auth(smtp.PlainAuth("", m.Username, m.Password, m.Server)); err != nil {
+			return RunResult[MailResult]{Status: ResultStatusFailed}, err
+		}
+	}
+
+	// To && From.
+	if err := client.Mail(m.From); err != nil {
+		return RunResult[MailResult]{Status: ResultStatusFailed}, err
+	}
+	if err := client.Rcpt(m.To); err != nil {
+		return RunResult[MailResult]{Status: ResultStatusFailed}, err
+	}
+
+	// Headers and Data.
+	writer, err := client.Data()
+	if err != nil {
+		return RunResult[MailResult]{Status: ResultStatusFailed}, err
+	}
+
+	headers := "From: " + m.From + "\r\n"
+	headers += "To: " + m.To + "\r\n"
+	headers += "Subject: " + m.Subject + "\r\n"
+
+	for key, value := range m.Headers {
+		headers += key + ": " + value + "\r\n"
+	}
+
+	_, err = writer.Write([]byte(headers + "\r\n" + m.Body))
+	if err != nil {
+		return RunResult[MailResult]{Status: ResultStatusFailed}, err
+	}
+
+	_ = writer.Close()
+
+	// Quit and get the SMTP status code.
+	smtpStatusCode, _ := client.Text.Cmd("QUIT")
+
+	return RunResult[MailResult]{Status: ResultStatusSuccess, Data: MailResult{Sent: true, SmtpStatusCode: smtpStatusCode}}, nil
+}
diff --git a/runnable-mail_test.go b/runnable-mail_test.go
new file mode 100644
index 0000000..65e814e
--- /dev/null
+++ b/runnable-mail_test.go
@@ -0,0 +1,186 @@
+package jobqueue
+
+import (
+	"context"
+	"fmt"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+	"github.com/docker/go-connections/nat"
+	"github.com/stretchr/testify/assert"
+	"net"
+	"testing"
+	"time"
+)
+
+func startTestSMTPDockerImageAndContainer(t *testing.T, host string, port string, ctx context.Context) error {
+	t.Helper()
+
+	cli, err := client.NewClientWithOpts(client.WithVersion("1.41"))
+	if err != nil {
+		return err
+	}
+
+	imageName := "axllent/mailpit"
+
+	reader, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
+	if err != nil {
+		return err
+	}
+
+	// if debug image pull, comment out the following lines
+	//_, _ = io.Copy(os.Stdout, reader)
+	_ = reader
+
+	hostConfig := &container.HostConfig{
+		PortBindings: nat.PortMap{
+			"1025/tcp": []nat.PortBinding{
+				{
+					HostIP:   host,
+					HostPort: port,
+				},
+			},
+			"8025/tcp": []nat.PortBinding{
+				{
+					HostIP:   host,
+					HostPort: "8025",
+				},
+			},
+		},
+	}
+
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image: imageName,
+	}, hostConfig, nil, nil, "")
+
+	if err != nil {
+		return err
+	}
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return err
+	}
+
+	go func() {
+		<-ctx.Done()
+
+		timeout := 0
+		stopOptions := container.StopOptions{
+			Timeout: &timeout,
+			Signal:  "SIGKILL",
+		}
+		newCtx, _ := context.WithTimeout(context.Background(), 20*time.Second)
+		if err := cli.ContainerStop(newCtx, resp.ID, stopOptions); err != nil {
+			t.Errorf("ContainerStop returned error: %v", err)
+		}
+		if err := cli.ContainerRemove(newCtx, resp.ID, types.ContainerRemoveOptions{
+			Force: true,
+		}); err != nil {
+			t.Errorf("ContainerRemove returned error: %v", err)
+		}
+
+	}()
+
+	statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
+	select {
+	case err := <-errCh:
+		if err != nil {
+			// empty error means container exited normally (see container_wait.go)
+			if err.Error() == "" {
+				return nil
+			}
+
+			return err
+		}
+	case <-statusCh:
+
+	}
+
+	return nil
+}
+
+func TestMailRunner(t *testing.T) {
+	ctb := context.Background()
+	ctx, cancel := context.WithCancel(ctb)
+	t.Cleanup(func() {
+		cancel()
+		time.Sleep(1 * time.Second)
+	})
+
+	host := "127.0.0.1"
+
+	listener, err := net.Listen("tcp", host+":0")
+	if err != nil {
+		t.Errorf("Unexpected error: %v", err)
+		return
+	}
+	portAsInt := listener.Addr().(*net.TCPAddr).Port
+	portAsString := fmt.Sprintf("%d", portAsInt)
+	_ = listener.Close()
+
+	done := make(chan bool)
+	go func() {
+		err = startTestSMTPDockerImageAndContainer(t, host, portAsString, ctx)
+		if err != nil {
+			t.Errorf("Unexpected error: %v", err)
+			cancel()
+		}
+		done <- true
+	}()
+
+	waitCtx, waitCancel := context.WithTimeout(ctx, 30*time.Second)
+	defer waitCancel()
+	for {
+		conn, err := net.DialTimeout("tcp", net.JoinHostPort(host, portAsString), 1*time.Second)
+		if err == nil {
+			err = conn.Close()
+			assert.Nil(t, err)
+			break
+		}
+		select {
+		case <-waitCtx.Done():
+			t.Error("Timeout waiting for container service")
+			cancel()
+			return
+		default:
+			time.Sleep(1 * time.Second)
+		}
+	}
+
+	time.Sleep(1 * time.Second)
+
+	mailRunnable := &MailRunnable{
+		To:       "to@example.com",
+		From:     "from@example.com",
+		Subject:  "this is a test",
+		Body:     "this is the body",
+		Server:   host,
+		Port:     portAsString,
+		Username: "",
+		Password: "",
+		Headers: map[string]string{
+			"X-Test": "test",
+		},
+	}
+
+	result, err := mailRunnable.Run()
+
+	// Assertions
+	assert.NoError(t, err)
+	assert.Equal(t, ResultStatusSuccess, result.Status)
+	assert.IsType(t, MailResult{}, result.Data)
+
+	// check result.Data contains 4 files
+	mailResult := result.Data.Sent
+	assert.Equal(t, true, mailResult)
+
+	cancel()
+
+	select {
+	case <-done:
+		time.Sleep(1 * time.Second)
+	case <-time.After(1 * time.Minute):
+		t.Error("test hangs, timeout reached")
+	}
+
+}
diff --git a/runnable-sftp.go b/runnable-sftp.go
new file mode 100644
index 0000000..c57400c
--- /dev/null
+++ b/runnable-sftp.go
@@ -0,0 +1,209 @@
+package jobqueue
+
+import (
+	"fmt"
+	"github.com/pkg/sftp"
+	"golang.org/x/crypto/ssh"
+	"io"
+	"os"
+)
+
+// SFTPResult is a result of a sftp
+type SFTPResult struct {
+	FilesCopied []string
+}
+
+const (
+	CredentialTypePassword = "password"
+	CredentialTypeKey      = "key"
+)
+
+type Direction string
+
+const (
+	LocalToRemote Direction = "LocalToRemote"
+	RemoteToLocal Direction = "RemoteToLocal"
+)
+
+type SFTPRunnable struct {
+	Host              string
+	Port              int
+	User              string
+	Insecure          bool
+	Credential        string
+	CredentialType    string
+	HostKey           string
+	SrcDir            string
+	DstDir            string
+	TransferDirection Direction
+}
+
+func (s *SFTPRunnable) Run() (RunResult[SFTPResult], error) {
+
+	var authMethod ssh.AuthMethod
+
+	// Auth
+	switch s.CredentialType {
+	case CredentialTypePassword:
+		authMethod = ssh.Password(s.Credential)
+	case CredentialTypeKey:
+		key, err := ssh.ParsePrivateKey([]byte(s.Credential))
+		if err != nil {
+			return RunResult[SFTPResult]{Status: ResultStatusFailed}, err
+		}
+		authMethod = ssh.PublicKeys(key)
+	default:
+		return RunResult[SFTPResult]{Status: ResultStatusFailed}, ErrUnsupportedCredentialType
+	}
+
+	var hkCallback ssh.HostKeyCallback
+
+	if s.HostKey != "" {
+
+		hostkeyBytes := []byte(s.HostKey)
+		hostKey, err := ssh.ParsePublicKey(hostkeyBytes)
+		if err != nil {
+			return RunResult[SFTPResult]{Status: ResultStatusFailed}, err
+		}
+
+		hkCallback = ssh.FixedHostKey(hostKey)
+	} else {
+		if s.Insecure {
+			hkCallback = ssh.InsecureIgnoreHostKey()
+		} else {
+			hkCallback = ssh.FixedHostKey(nil)
+		}
+	}
+
+	config := &ssh.ClientConfig{
+		User: s.User,
+		Auth: []ssh.AuthMethod{
+			authMethod,
+		},
+		HostKeyCallback: hkCallback,
+	}
+
+	client, err := ssh.Dial("tcp", fmt.Sprintf("%s:%d", s.Host, s.Port), config)
+	if err != nil {
+		return RunResult[SFTPResult]{Status: ResultStatusFailed}, err
+	}
+	defer client.Close()
+
+	sftpClient, err := sftp.NewClient(client)
+	if err != nil {
+		return RunResult[SFTPResult]{Status: ResultStatusFailed}, err
+	}
+	defer sftpClient.Close()
+
+	var filesCopied []string
+
+	switch s.TransferDirection {
+	case LocalToRemote:
+		filesCopied, err = s.copyLocalToRemote(sftpClient)
+	case RemoteToLocal:
+		filesCopied, err = s.copyRemoteToLocal(sftpClient)
+	default:
+		return RunResult[SFTPResult]{Status: ResultStatusFailed}, ErrUnsupportedTransferDirection
+	}
+
+	if err != nil {
+		return RunResult[SFTPResult]{Status: ResultStatusFailed}, err
+	}
+
+	if err != nil {
+		return RunResult[SFTPResult]{Status: ResultStatusFailed}, err
+	}
+
+	return RunResult[SFTPResult]{Status: ResultStatusSuccess, Data: SFTPResult{FilesCopied: filesCopied}}, nil
+}
+
+func copyFile(src io.Reader, dst io.Writer) error {
+	_, err := io.Copy(dst, src)
+	return err
+}
+
+func (s *SFTPRunnable) copyLocalToRemote(sftpClient *sftp.Client) ([]string, error) {
+
+	var filesCopied []string
+
+	// create destination directory
+	err := sftpClient.MkdirAll(s.DstDir)
+	if err != nil {
+		return nil, err
+	}
+
+	// copy files
+	files, err := os.ReadDir(s.SrcDir)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, file := range files {
+		if file.IsDir() {
+			continue
+		}
+
+		srcFile, err := os.Open(fmt.Sprintf("%s/%s", s.SrcDir, file.Name()))
+		if err != nil {
+			return nil, err
+		}
+		dstFile, err := sftpClient.Create(fmt.Sprintf("%s/%s", s.DstDir, file.Name()))
+		if err != nil {
+			_ = srcFile.Close()
+			return nil, err
+		}
+		err = copyFile(srcFile, dstFile)
+		_ = srcFile.Close()
+		_ = dstFile.Close()
+		if err != nil {
+			return nil, err
+		}
+
+		filesCopied = append(filesCopied, fmt.Sprintf("%s/%s", s.DstDir, file.Name()))
+	}
+
+	return filesCopied, nil
+}
+
+func (s *SFTPRunnable) copyRemoteToLocal(sftpClient *sftp.Client) ([]string, error) {
+
+	var filesCopied []string
+
+	// create destination directory
+	err := os.MkdirAll(s.DstDir, 0755)
+	if err != nil {
+		return nil, err
+	}
+
+	// copy files
+	files, err := sftpClient.ReadDir(s.SrcDir)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, file := range files {
+		if file.IsDir() {
+			continue
+		}
+
+		srcFile, err := sftpClient.Open(fmt.Sprintf("%s/%s", s.SrcDir, file.Name()))
+		if err != nil {
+			return nil, err
+		}
+		dstFile, err := os.Create(fmt.Sprintf("%s/%s", s.DstDir, file.Name()))
+		if err != nil {
+			_ = srcFile.Close()
+			return nil, err
+		}
+		err = copyFile(srcFile, dstFile)
+		_ = srcFile.Close()
+		_ = dstFile.Close()
+		if err != nil {
+			return nil, err
+		}
+
+		filesCopied = append(filesCopied, fmt.Sprintf("%s/%s", s.DstDir, file.Name()))
+	}
+
+	return filesCopied, nil
+}
diff --git a/runnable-sftp_test.go b/runnable-sftp_test.go
new file mode 100644
index 0000000..4ab86c7
--- /dev/null
+++ b/runnable-sftp_test.go
@@ -0,0 +1,296 @@
+package jobqueue
+
+import (
+	"context"
+	"fmt"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+	"github.com/docker/go-connections/nat"
+	"github.com/stretchr/testify/assert"
+	"net"
+	"os"
+	"testing"
+	"time"
+)
+
+func startSFTPTestDockerImageAndContainer(t *testing.T, host string, port string, volume string, ctx context.Context) error {
+	t.Helper()
+
+	cli, err := client.NewClientWithOpts(client.WithVersion("1.41"))
+	if err != nil {
+		return err
+	}
+
+	imageName := "atmoz/sftp:alpine"
+
+	reader, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
+	if err != nil {
+		return err
+	}
+
+	// if debug image pull, comment out the following lines
+	//_, _ = io.Copy(os.Stdout, reader)
+	_ = reader
+
+	hostConfig := &container.HostConfig{
+		PortBindings: nat.PortMap{
+			"22/tcp": []nat.PortBinding{
+				{
+					HostIP:   host,
+					HostPort: port,
+				},
+			},
+		},
+	}
+
+	if volume != "" {
+		hostConfig.Binds = append(hostConfig.Binds, volume+":/home/demo/upload")
+	}
+
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image: imageName,
+		Cmd:   []string{"demo:secret:::upload"},
+	}, hostConfig, nil, nil, "")
+
+	if err != nil {
+		return err
+	}
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		return err
+	}
+
+	go func() {
+		<-ctx.Done()
+
+		timeout := 0
+		stopOptions := container.StopOptions{
+			Timeout: &timeout,
+			Signal:  "SIGKILL",
+		}
+		newCtx, _ := context.WithTimeout(context.Background(), 20*time.Second)
+		if err := cli.ContainerStop(newCtx, resp.ID, stopOptions); err != nil {
+			t.Errorf("ContainerStop returned error: %v", err)
+		}
+		if err := cli.ContainerRemove(newCtx, resp.ID, types.ContainerRemoveOptions{
+			Force: true,
+		}); err != nil {
+			t.Errorf("ContainerRemove returned error: %v", err)
+		}
+
+	}()
+
+	statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
+	select {
+	case err := <-errCh:
+		if err != nil {
+			// empty error means container exited normally (see container_wait.go)
+			if err.Error() == "" {
+				return nil
+			}
+
+			return err
+		}
+	case <-statusCh:
+
+	}
+
+	return nil
+}
+
+func TestSFTPCRunnerLocalToRemote(t *testing.T) {
+	ctb := context.Background()
+	ctx, cancel := context.WithCancel(ctb)
+	t.Cleanup(func() {
+		cancel()
+		time.Sleep(1 * time.Second)
+	})
+
+	host := "127.0.0.1"
+
+	listener, err := net.Listen("tcp", host+":0")
+	if err != nil {
+		t.Errorf("Unexpected error: %v", err)
+		return
+	}
+	portAsInt := listener.Addr().(*net.TCPAddr).Port
+	portAsString := fmt.Sprintf("%d", portAsInt)
+	_ = listener.Close()
+
+	done := make(chan bool)
+	go func() {
+		err = startSFTPTestDockerImageAndContainer(t, host, portAsString, "", ctx)
+		if err != nil {
+			t.Errorf("Unexpected error: %v", err)
+			cancel()
+		}
+		done <- true
+	}()
+
+	waitCtx, waitCancel := context.WithTimeout(ctx, 30*time.Second)
+	defer waitCancel()
+	for {
+		conn, err := net.DialTimeout("tcp", net.JoinHostPort(host, portAsString), 1*time.Second)
+		if err == nil {
+			err = conn.Close()
+			assert.Nil(t, err)
+			break
+		}
+		select {
+		case <-waitCtx.Done():
+			t.Error("Timeout waiting for container service")
+			cancel()
+			return
+		default:
+			time.Sleep(1 * time.Second)
+		}
+	}
+
+	time.Sleep(1 * time.Second)
+
+	tempDir := t.TempDir()
+	// create 4 test files
+	for i := 0; i < 4; i++ {
+		_, err := os.Create(fmt.Sprintf("%s/testfile%d.txt", tempDir, i))
+		if err != nil {
+			t.Errorf("Unexpected error: %v", err)
+			return
+		}
+	}
+
+	sftpRunnable := &SFTPRunnable{
+		Host:           host,
+		Port:           portAsInt,
+		User:           "demo",
+		Insecure:       true,
+		Credential:     "secret",
+		CredentialType: "password",
+
+		SrcDir:            tempDir,
+		DstDir:            "upload",
+		TransferDirection: LocalToRemote,
+	}
+
+	result, err := sftpRunnable.Run()
+
+	// Assertions
+	assert.NoError(t, err)
+	assert.Equal(t, ResultStatusSuccess, result.Status)
+	assert.IsType(t, SFTPResult{}, result.Data)
+
+	// check result.Data contains 4 files
+	sftpResult := result.Data.FilesCopied
+	assert.Equal(t, 4, len(sftpResult))
+
+	cancel()
+
+	select {
+	case <-done:
+		time.Sleep(1 * time.Second)
+	case <-time.After(1 * time.Minute):
+		t.Error("test hangs, timeout reached")
+	}
+
+}
+
+func TestSFTPCRunnerRemoteToLocal(t *testing.T) {
+	ctb := context.Background()
+	ctx, cancel := context.WithCancel(ctb)
+	t.Cleanup(func() {
+		cancel()
+		time.Sleep(1 * time.Second)
+	})
+
+	host := "127.0.0.1"
+
+	listener, err := net.Listen("tcp", host+":0")
+	if err != nil {
+		t.Errorf("Unexpected error: %v", err)
+		return
+	}
+	portAsInt := listener.Addr().(*net.TCPAddr).Port
+	portAsString := fmt.Sprintf("%d", portAsInt)
+	_ = listener.Close()
+
+	tempSrcDir := t.TempDir()
+	// create 4 test files
+	for i := 0; i < 4; i++ {
+		_, err := os.Create(fmt.Sprintf("%s/testfile%d.txt", tempSrcDir, i))
+		if err != nil {
+			t.Errorf("Unexpected error: %v", err)
+			return
+		}
+	}
+
+	done := make(chan bool)
+	go func() {
+		err = startSFTPTestDockerImageAndContainer(t, host, portAsString, tempSrcDir, ctx)
+		if err != nil {
+			t.Errorf("Unexpected error: %v", err)
+			cancel()
+		}
+		done <- true
+	}()
+
+	waitCtx, waitCancel := context.WithTimeout(ctx, 30*time.Second)
+	defer waitCancel()
+	for {
+		conn, err := net.DialTimeout("tcp", net.JoinHostPort(host, portAsString), 1*time.Second)
+		if err == nil {
+			err = conn.Close()
+			assert.Nil(t, err)
+			break
+		}
+		select {
+		case <-waitCtx.Done():
+			t.Error("Timeout waiting for container service")
+			cancel()
+			return
+		default:
+			time.Sleep(1 * time.Second)
+		}
+	}
+
+	time.Sleep(1 * time.Second)
+
+	tempDir := t.TempDir()
+
+	sftpRunnable := &SFTPRunnable{
+		Host:              host,
+		Port:              portAsInt,
+		User:              "demo",
+		Insecure:          true,
+		Credential:        "secret",
+		CredentialType:    "password",
+		SrcDir:            "upload", // Remote-Verzeichnis mit Dateien
+		DstDir:            tempDir,
+		TransferDirection: RemoteToLocal,
+	}
+
+	// Methode aufrufen
+	result, err := sftpRunnable.Run()
+
+	// Assertions
+	assert.NoError(t, err)
+	assert.Equal(t, ResultStatusSuccess, result.Status)
+	assert.IsType(t, SFTPResult{}, result.Data)
+
+	// check result.Data contains 4 files
+	sftpResult := result.Data.FilesCopied
+	assert.Equal(t, 4, len(sftpResult))
+
+	// check files in tempDir
+	files, err := os.ReadDir(tempDir)
+	assert.NoError(t, err)
+	assert.Equal(t, 4, len(files))
+
+	cancel()
+
+	select {
+	case <-done:
+		time.Sleep(1 * time.Second)
+	case <-time.After(1 * time.Minute):
+		t.Error("test hangs, timeout reached")
+	}
+}
diff --git a/runnable-shell.go b/runnable-shell.go
new file mode 100644
index 0000000..af60f25
--- /dev/null
+++ b/runnable-shell.go
@@ -0,0 +1,52 @@
+package jobqueue
+
+import (
+	"os/exec"
+	"strings"
+)
+
+// ShellResult is a result of a shell script
+type ShellResult struct {
+	Output   string
+	Error    string
+	ExitCode int
+}
+
+type ShellRunnable struct {
+	ScriptPath string
+}
+
+func (s *ShellRunnable) Run() (RunResult[ShellResult], error) {
+	cmd := exec.Command("sh", s.ScriptPath)
+	output, err := cmd.Output()
+
+	var stderr []byte
+	if err != nil {
+		stderr = err.(*exec.ExitError).Stderr
+	}
+
+	exitCode := 0
+
+	if err != nil {
+		if exitError, ok := err.(*exec.ExitError); ok {
+			exitCode = exitError.ExitCode()
+		}
+		return RunResult[ShellResult]{
+			Status: ResultStatusFailed,
+			Data: ShellResult{
+				Output:   string(output),
+				ExitCode: exitCode,
+				Error:    string(stderr),
+			},
+		}, err
+	}
+
+	return RunResult[ShellResult]{
+		Status: ResultStatusSuccess,
+		Data: ShellResult{
+			Output:   strings.TrimSpace(string(output)),
+			ExitCode: exitCode,
+			Error:    string(stderr),
+		},
+	}, nil
+}
diff --git a/runnable-shell_test.go b/runnable-shell_test.go
new file mode 100644
index 0000000..eab6496
--- /dev/null
+++ b/runnable-shell_test.go
@@ -0,0 +1,46 @@
+package jobqueue
+
+import (
+	"os"
+	"path"
+	"testing"
+)
+
+func TestShellRunnable_Run(t *testing.T) {
+	// Erstellen einer temporären Shell-Datei#
+	tmpDir := t.TempDir()
+	tmpFile := "example.sh"
+	tmpPath := path.Join(tmpDir, tmpFile)
+
+	fp, err := os.Create(tmpPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer fp.Close()
+
+	content := []byte("#!/bin/sh\necho 'Hello, world!'\n")
+	if _, err := fp.Write(content); err != nil {
+		t.Fatal(err)
+	}
+
+	// ShellRunnable mit dem Pfad der temporären Datei initialisieren
+	shellRunnable := ShellRunnable{ScriptPath: fp.Name()}
+
+	// Run-Methode aufrufen
+	result, err := shellRunnable.Run()
+
+	// Überprüfungen
+	if err != nil {
+		t.Errorf("Run() failed with error: %v", err)
+	}
+	if result.Status != ResultStatusSuccess {
+		t.Errorf("Expected status Success, got %v", result.Status)
+	}
+
+	if result.Data.Output != "Hello, world!" {
+		t.Errorf("Expected output 'Hello, world!', got '%v'", result.Data.Output)
+	}
+	if result.Data.ExitCode != 0 {
+		t.Errorf("Expected exit code 0, got %v", result.Data.ExitCode)
+	}
+}
diff --git a/runnable.go b/runnable.go
index 0e99d16..99f66a1 100644
--- a/runnable.go
+++ b/runnable.go
@@ -1,88 +1,25 @@
 package jobqueue
 
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"os/exec"
-)
-
-type Runnable interface {
-	Run(ctx context.Context) (int, any, error)
-}
+type ResultStatus int
 
-type GoFunctionRunner struct {
-	Func func() (int, any, error)
-}
+const (
+	ResultStatusSuccess ResultStatus = iota
+	ResultStatusFailed
+)
 
-type Result struct {
-	Code   int
-	Result any
-	Err    error
+type RunGenericResult interface {
+	GetStatus() ResultStatus
 }
 
-func (g *GoFunctionRunner) Run(ctx context.Context) (int, any, error) {
-	done := make(chan Result)
-
-	go func() {
-		var res Result
-		defer func() {
-			if r := recover(); r != nil {
-				res.Err = fmt.Errorf("Command panicked: %w", fmt.Errorf("%v", r))
-			}
-			done <- res
-		}()
-		res.Code, res.Result, res.Err = g.Func()
-	}()
-
-	select {
-	case res := <-done:
-		return res.Code, res.Result, res.Err
-	case <-ctx.Done():
-		return RunnableTerminatedExitCode, nil, ctx.Err()
-	}
+type RunResult[T any] struct {
+	Status ResultStatus
+	Data   T
 }
 
-type ExternalProcessRunner struct {
-	Command string
-	Args    []string
+func (r RunResult[T]) GetStatus() ResultStatus {
+	return r.Status
 }
 
-func (e *ExternalProcessRunner) Run(ctx context.Context) (int, any, error) {
-	var stdout, stderr bytes.Buffer
-	cmd := exec.CommandContext(ctx, e.Command, e.Args...)
-	cmd.Stdout = &stdout
-	cmd.Stderr = &stderr
-
-	done := make(chan Result)
-
-	go func() {
-		var res Result
-		defer func() {
-			if r := recover(); r != nil {
-				res.Err = fmt.Errorf("Command panicked: %w", fmt.Errorf("%v", r))
-			}
-			done <- res
-		}()
-		err := cmd.Run()
-		res.Result = stdout.String() + stderr.String()
-		if err != nil {
-			if exitErr, ok := err.(*exec.ExitError); ok {
-				res.Code = exitErr.ExitCode()
-			}
-			res.Err = err
-		} else {
-			res.Code = cmd.ProcessState.ExitCode()
-		}
-	}()
-
-	select {
-	case res := <-done:
-		return res.Code, res.Result, res.Err
-	case <-ctx.Done():
-		if cmd.Process != nil {
-			_ = cmd.Process.Kill()
-		}
-		return RunnableTerminatedExitCode, nil, ctx.Err()
-	}
+type Runnable[T any] interface {
+	Run() (RunResult[T], error)
 }
diff --git a/runnable_test.go b/runnable_test.go
index 168e5c0..437ef15 100644
--- a/runnable_test.go
+++ b/runnable_test.go
@@ -1,131 +1,52 @@
 package jobqueue
 
 import (
-	"context"
+	"errors"
 	"testing"
-	"time"
 )
 
-func TestGoFunctionRunner(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-	defer cancel()
+// MockSuccessfulRunnable gibt immer ResultStatusSuccess zurück
+type MockSuccessfulRunnable struct{}
 
-	runner := &GoFunctionRunner{
-		Func: func() (int, any, error) {
-			return 42, nil, nil
-		},
-	}
-
-	result, _, err := runner.Run(ctx)
-	if err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-	if result != 42 {
-		t.Errorf("Expected 42, got %v", result)
-	}
+func (m MockSuccessfulRunnable) Run() (RunResult[string], error) {
+	return RunResult[string]{Status: ResultStatusSuccess, Data: "Success"}, nil
 }
 
-func TestGoFunctionRunnerTimeout(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
-	defer cancel()
-
-	runner := &GoFunctionRunner{
-		Func: func() (int, any, error) {
-			time.Sleep(10 * time.Millisecond)
-			return 42, nil, nil
-		},
-	}
+// MockFailedRunnable gibt immer ResultStatusFailed zurück
+type MockFailedRunnable struct{}
 
-	_, _, err := runner.Run(ctx)
-	if err == nil {
-		t.Errorf("Expected timeout error, got nil")
-	}
+func (m MockFailedRunnable) Run() (RunResult[string], error) {
+	return RunResult[string]{Status: ResultStatusFailed, Data: "Failed"}, nil
 }
 
-func TestExternalProcessRunner(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Hour)
-	defer cancel()
-
-	runner := &ExternalProcessRunner{
-		Command: "echo",
-		Args:    []string{"hello"},
-	}
+// MockErrorRunnable gibt immer einen Fehler zurück
+type MockErrorRunnable struct{}
 
-	_, _, err := runner.Run(ctx)
-	if err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-}
-
-func TestExternalProcessRunnerFail(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-	defer cancel()
-
-	runner := &ExternalProcessRunner{
-		Command: "nonexistentcommand",
-	}
-
-	_, _, err := runner.Run(ctx)
-	if err == nil {
-		t.Errorf("Expected error, got nil")
-	}
+func (m MockErrorRunnable) Run() (RunResult[string], error) {
+	return RunResult[string]{}, errors.New("RunError")
 }
 
-func TestGoFunctionRunnerNilFunc(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-	defer cancel()
+func TestRunnable(t *testing.T) {
+	var run Runnable[string]
 
-	runner := &GoFunctionRunner{}
-
-	_, _, err := runner.Run(ctx)
-	if err == nil {
-		t.Errorf("Expected error, got nil")
+	// Test für erfolgreiche Ausführung
+	run = MockSuccessfulRunnable{}
+	result, err := run.Run()
+	if result.Status != ResultStatusSuccess || err != nil {
+		t.Errorf("Expected success, got %v, %v", result.Status, err)
 	}
-}
-
-func TestGoFunctionRunnerPanic(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-	defer cancel()
 
-	runner := &GoFunctionRunner{
-		Func: func() (int, any, error) {
-			panic("Test panic")
-		},
+	// Test für fehlgeschlagene Ausführung
+	run = MockFailedRunnable{}
+	result, err = run.Run()
+	if result.Status != ResultStatusFailed || err != nil {
+		t.Errorf("Expected failure, got %v, %v", result.Status, err)
 	}
 
-	_, _, err := runner.Run(ctx)
+	// Test für Ausführungsfehler
+	run = MockErrorRunnable{}
+	result, err = run.Run()
 	if err == nil {
 		t.Errorf("Expected error, got nil")
 	}
 }
-
-func TestGoFunctionRunnerExpiredContext(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
-	defer cancel()
-	time.Sleep(2 * time.Nanosecond)
-
-	runner := &GoFunctionRunner{
-		Func: func() (int, any, error) {
-			return 42, nil, nil
-		},
-	}
-
-	_, _, err := runner.Run(ctx)
-	if err == nil {
-		t.Errorf("Expected context expired error, got nil")
-	}
-}
-
-func TestExternalProcessRunnerInvalidCommand(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-	defer cancel()
-
-	runner := &ExternalProcessRunner{
-		Command: "",
-	}
-
-	_, _, err := runner.Run(ctx)
-	if err == nil {
-		t.Errorf("Expected error for invalid command, got nil")
-	}
-}
diff --git a/scheduler.go b/scheduler.go
new file mode 100644
index 0000000..b94b699
--- /dev/null
+++ b/scheduler.go
@@ -0,0 +1,344 @@
+package jobqueue
+
+import (
+	"fmt"
+	"github.com/robfig/cron/v3"
+	"time"
+)
+
+type StopChan chan bool
+
+type Scheduler interface {
+	Schedule(job GenericJob, eventBus *EventBus) error
+	Cancel(id JobID) error
+	CancelAll() error
+	JobExists(id JobID) bool
+
+	GetType() string
+}
+
+// IntervalScheduler is a scheduler that schedules a job at a fixed interval
+type IntervalScheduler struct {
+	Interval time.Duration
+	jobs     map[JobID]StopChan
+}
+
+func (s *IntervalScheduler) Schedule(job GenericJob, eventBus *EventBus) error {
+
+	if s.Interval <= 0 {
+		return fmt.Errorf("invalid interval: %v", s.Interval)
+	}
+
+	if s.jobs == nil {
+		s.jobs = make(map[JobID]StopChan)
+	}
+
+	id := job.GetID()
+	if _, ok := s.jobs[id]; ok {
+		return fmt.Errorf("job %s already scheduled", id)
+	}
+
+	stopChan := make(StopChan)
+	s.jobs[id] = stopChan
+
+	ticker := time.NewTicker(s.Interval)
+	go func() {
+		for {
+			select {
+			case <-ticker.C:
+				eventBus.Publish(QueueJob, job)
+			case <-stopChan:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	return nil
+}
+
+func (s *IntervalScheduler) GetType() string {
+	return "Interval"
+}
+
+func (s *IntervalScheduler) Cancel(id JobID) error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	if stopChan, ok := s.jobs[id]; ok {
+		stopChan <- true
+		delete(s.jobs, id)
+	}
+
+	return nil
+}
+
+func (s *IntervalScheduler) CancelAll() error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	for _, stopChan := range s.jobs {
+		stopChan <- true
+	}
+
+	s.jobs = nil
+	return nil
+}
+
+func (s *IntervalScheduler) JobExists(id JobID) bool {
+	if s.jobs == nil {
+		return false
+	}
+
+	_, ok := s.jobs[id]
+	return ok
+}
+
+// CronScheduler is a scheduler that uses the cron library to schedule jobs
+type CronScheduler struct {
+	cron *cron.Cron
+	Spec string
+	jobs map[JobID]cron.EntryID
+}
+
+func (s *CronScheduler) Schedule(job GenericJob, eventBus *EventBus) error {
+	var err error
+
+	if s.cron == nil {
+		return ErrCronNotInitialized
+	}
+
+	if s.jobs == nil {
+		s.jobs = make(map[JobID]cron.EntryID)
+	}
+
+	id := job.GetID()
+	if _, ok := s.jobs[id]; ok {
+		return fmt.Errorf("job %s already scheduled", id)
+	}
+
+	entryId, err := s.cron.AddFunc(s.Spec, func() {
+		eventBus.Publish(QueueJob, job)
+	})
+
+	s.jobs[id] = entryId
+
+	if err != nil {
+		return err
+	}
+
+	s.cron.Start()
+	return nil
+}
+
+func (s *CronScheduler) GetType() string {
+	return "Cron"
+}
+
+func (s *CronScheduler) Cancel(id JobID) error {
+
+	if s.jobs == nil {
+		return nil
+	}
+
+	if entryId, ok := s.jobs[id]; ok {
+		s.cron.Remove(entryId)
+	}
+
+	return nil
+}
+
+func (s *CronScheduler) CancelAll() error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	for _, entryId := range s.jobs {
+		s.cron.Remove(entryId)
+	}
+
+	s.jobs = nil
+	return nil
+}
+
+func (s *CronScheduler) JobExists(id JobID) bool {
+	if s.jobs == nil {
+		return false
+	}
+
+	_, ok := s.jobs[id]
+	return ok
+}
+
+// DelayScheduler is a scheduler that schedules a job after a delay
+type DelayScheduler struct {
+	Delay time.Duration
+	jobs  map[JobID]StopChan
+}
+
+func (s *DelayScheduler) Schedule(job GenericJob, eventBus *EventBus) error {
+	timer := time.NewTimer(s.Delay)
+
+	if s.jobs == nil {
+		s.jobs = make(map[JobID]StopChan)
+	}
+
+	id := job.GetID()
+	if _, ok := s.jobs[id]; ok {
+		return fmt.Errorf("job %s already scheduled", id)
+	}
+
+	stopChan := make(StopChan)
+	s.jobs[id] = stopChan
+
+	go func() {
+		select {
+		case <-timer.C:
+			eventBus.Publish(QueueJob, job)
+		case <-stopChan:
+			timer.Stop()
+		}
+	}()
+	return nil
+}
+
+func (s *DelayScheduler) GetType() string {
+	return "Delay"
+}
+
+func (s *DelayScheduler) Cancel(id JobID) error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	if stopChan, ok := s.jobs[id]; ok {
+		stopChan <- true
+		delete(s.jobs, id)
+	}
+
+	return nil
+}
+
+func (s *DelayScheduler) CancelAll() error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	for _, stopChan := range s.jobs {
+		stopChan <- true
+	}
+
+	s.jobs = nil
+	return nil
+}
+
+func (s *DelayScheduler) JobExists(id JobID) bool {
+	if s.jobs == nil {
+		return false
+	}
+
+	_, ok := s.jobs[id]
+	return ok
+}
+
+// EventScheduler is a scheduler that schedules a job when an event is received
+type EventScheduler struct {
+	Event EventName
+	jobs  map[JobID]StopChan
+}
+
+func (s *EventScheduler) Schedule(job GenericJob, eventBus *EventBus) error {
+	ch := make(chan interface{})
+	eventBus.Subscribe(s.Event, ch)
+
+	if s.jobs == nil {
+		s.jobs = make(map[JobID]StopChan)
+	}
+
+	id := job.GetID()
+	if _, ok := s.jobs[id]; ok {
+		return fmt.Errorf("job %s already scheduled", id)
+	}
+
+	stopChan := make(StopChan)
+	s.jobs[id] = stopChan
+
+	go func() {
+		for {
+			select {
+			case <-ch:
+				eventBus.Publish(QueueJob, job)
+			case <-stopChan:
+				eventBus.Unsubscribe(s.Event, ch)
+				return
+			}
+		}
+	}()
+	return nil
+}
+
+func (s *EventScheduler) GetType() string {
+	return "Event"
+}
+
+func (s *EventScheduler) Cancel(id JobID) error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	if stopChan, ok := s.jobs[id]; ok {
+		stopChan <- true
+		delete(s.jobs, id)
+	}
+
+	return nil
+
+}
+
+func (s *EventScheduler) CancelAll() error {
+	if s.jobs == nil {
+		return nil
+	}
+
+	for _, stopChan := range s.jobs {
+		stopChan <- true
+	}
+
+	s.jobs = nil
+	return nil
+}
+
+func (s *EventScheduler) JobExists(id JobID) bool {
+	if s.jobs == nil {
+		return false
+	}
+
+	_, ok := s.jobs[id]
+	return ok
+}
+
+// InstantScheduler is a scheduler that schedules a job instantly
+type InstantScheduler struct{}
+
+func (s *InstantScheduler) Schedule(job GenericJob, eventBus *EventBus) error {
+	eventBus.Publish(QueueJob, job)
+	return nil
+}
+
+func (s *InstantScheduler) GetType() string {
+	return "Instant"
+}
+
+func (s *InstantScheduler) Cancel(id JobID) error {
+	return nil
+}
+
+func (s *InstantScheduler) CancelAll() error {
+	return nil
+}
+
+func (s *InstantScheduler) JobExists(id JobID) bool {
+	return false
+}
diff --git a/scheduler_test.go b/scheduler_test.go
new file mode 100644
index 0000000..9b6da3f
--- /dev/null
+++ b/scheduler_test.go
@@ -0,0 +1,257 @@
+package jobqueue
+
+import (
+	"github.com/robfig/cron/v3"
+	"github.com/stretchr/testify/assert"
+	"sync/atomic"
+	"testing"
+	"time"
+)
+
+func TestIntervalScheduler_BasicFunctionality(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+	scheduler := IntervalScheduler{Interval: time.Millisecond * 100}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	_ = scheduler.Schedule(genericJob, eventBus)
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+			atomic.AddInt32(&count, 1)
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Millisecond * 500)
+	if atomic.LoadInt32(&count) < 4 {
+		t.Errorf("Expected to run at least 4 times, ran %d times", count)
+	}
+}
+
+func TestIntervalScheduler_StopTicker(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+	scheduler := IntervalScheduler{Interval: time.Millisecond * 100}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	err := scheduler.Schedule(genericJob, eventBus)
+	assert.Nil(t, err)
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+			atomic.AddInt32(&count, 1)
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Millisecond * 150)
+	scheduler.Cancel(job.GetID())
+	time.Sleep(time.Millisecond * 100)
+
+	if atomic.LoadInt32(&count) != 1 {
+		t.Errorf("Expected to run 1 time, ran %d times", count)
+	}
+}
+
+func TestIntervalScheduler_InvalidInterval(t *testing.T) {
+	eventBus := NewEventBus()
+	scheduler := IntervalScheduler{Interval: time.Millisecond * 0}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	err := scheduler.Schedule(genericJob, eventBus)
+	if err == nil {
+		t.Errorf("Expected an error due to invalid interval")
+	}
+}
+
+func TestCronScheduler_BasicFunctionality(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	cronScheduler := CronScheduler{cron: cron.New(cron.WithSeconds()), Spec: "*/1 * * * * *"}
+	_ = cronScheduler.Schedule(genericJob, eventBus)
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+			atomic.AddInt32(&count, 1)
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Second * 3)
+	if atomic.LoadInt32(&count) < 2 {
+		t.Errorf("Expected to run at least 2 times, ran %d times", count)
+	}
+}
+
+func TestCronScheduler_StopScheduler(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+
+	c := cron.New(cron.WithSeconds())
+
+	cronScheduler := CronScheduler{cron: c, Spec: "*/1 * * * * *"}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	_ = cronScheduler.Schedule(genericJob, eventBus)
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+
+			atomic.AddInt32(&count, 1)
+
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Second * 2)
+	cronScheduler.Cancel(job.GetID())
+	time.Sleep(time.Second)
+
+	if atomic.LoadInt32(&count) < 1 {
+		t.Errorf("Expected to run at least 1 time, ran %d times", count)
+	}
+}
+
+func TestDelayScheduler_BasicFunctionality(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+	delayScheduler := DelayScheduler{Delay: time.Millisecond * 100}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	_ = delayScheduler.Schedule(genericJob, eventBus)
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+
+			atomic.AddInt32(&count, 1)
+
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Millisecond * 200)
+
+	if atomic.LoadInt32(&count) != 1 {
+		t.Errorf("Expected to run 1 time, ran %d times", count)
+	}
+}
+
+func TestDelayScheduler_StopBeforeExecute(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+	delayScheduler := DelayScheduler{Delay: time.Millisecond * 100}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	_ = delayScheduler.Schedule(genericJob, eventBus)
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+
+			atomic.AddInt32(&count, 1)
+
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Millisecond * 50)
+	delayScheduler.Cancel(job.GetID())
+	time.Sleep(time.Millisecond * 100)
+
+	if atomic.LoadInt32(&count) != 0 {
+		t.Errorf("Expected to not run, ran %d times", count)
+	}
+}
+
+func TestInstantScheduler_BasicFunctionality(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+	instantScheduler := InstantScheduler{}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	jobChannel := make(chan interface{})
+
+	go func() {
+		for _ = range jobChannel {
+			atomic.AddInt32(&count, 1)
+		}
+	}()
+
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	time.Sleep(time.Millisecond * 100)
+
+	genericJob := GenericJob(job)
+	_ = instantScheduler.Schedule(genericJob, eventBus)
+
+	time.Sleep(time.Millisecond * 100)
+
+	if atomic.LoadInt32(&count) != 1 {
+		t.Errorf("Expected to run 1 time, ran %d times", count)
+	}
+
+}
+
+func TestEventScheduler_BasicFunctionality(t *testing.T) {
+	var count int32
+	eventBus := NewEventBus()
+	eventScheduler := EventScheduler{Event: "trigger-event"}
+
+	job := NewJob[DummyResult]("test-job", &DummyRunnable{})
+
+	genericJob := GenericJob(job)
+	_ = eventScheduler.Schedule(genericJob, eventBus)
+
+	jobChannel := make(chan interface{})
+	eventBus.Subscribe(QueueJob, jobChannel)
+
+	go func() {
+		for _ = range jobChannel {
+			atomic.AddInt32(&count, 1)
+		}
+	}()
+
+	// Trigger the event
+	eventBus.Publish("trigger-event", Event{Data: nil, Name: "trigger-event"})
+
+	time.Sleep(time.Millisecond * 50) // Allow some time for the event to propagate
+
+	if atomic.LoadInt32(&count) != 1 {
+		t.Errorf("Expected to run 1 time, ran %d times", count)
+	}
+}
diff --git a/topological-sort.go b/topological-sort.go
index 6961b7f..748b580 100644
--- a/topological-sort.go
+++ b/topological-sort.go
@@ -2,36 +2,36 @@ package jobqueue
 
 import "container/heap"
 
-// JobIDPriority is a type that holds a JobID and its Priority
-type JobIDPriority struct {
-	ID       JobIDType
-	Priority int
+// idPriority is a type that holds a JobID and a Priority
+type idPriority struct {
+	id       JobID
+	priority Priority
 }
 
-// JobIDPriorityQueue is a priority jobs for JobIDPriority
-type JobIDPriorityQueue []JobIDPriority
+// idPriorityQueue is a type that holds a slice of idPriority
+type idPriorityQueue []idPriority
 
 // Len implements heap.Interface.Len
-func (pq JobIDPriorityQueue) Len() int { return len(pq) }
+func (pq *idPriorityQueue) Len() int { return len(*pq) }
 
 // Less implements heap.Interface.Less
-func (pq JobIDPriorityQueue) Less(i, j int) bool {
-	return pq[i].Priority > pq[j].Priority
+func (pq *idPriorityQueue) Less(i, j int) bool {
+	return (*pq)[i].priority > (*pq)[j].priority
 }
 
 // Swap implements heap.Interface.Swap
-func (pq JobIDPriorityQueue) Swap(i, j int) {
-	pq[i], pq[j] = pq[j], pq[i]
+func (pq *idPriorityQueue) Swap(i, j int) {
+	(*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i]
 }
 
 // Push implements heap.Interface.Push
-func (pq *JobIDPriorityQueue) Push(x interface{}) {
-	item := x.(JobIDPriority)
+func (pq *idPriorityQueue) Push(x interface{}) {
+	item := x.(idPriority)
 	*pq = append(*pq, item)
 }
 
 // Pop implements heap.Interface.Pop
-func (pq *JobIDPriorityQueue) Pop() interface{} {
+func (pq *idPriorityQueue) Pop() interface{} {
 	old := *pq
 	n := len(old)
 	item := old[n-1]
@@ -39,55 +39,55 @@ func (pq *JobIDPriorityQueue) Pop() interface{} {
 	return item
 }
 
-// topologicalSortJobs returns a topologically sorted list of job IDs
-func topologicalSortJobs(jobs map[JobIDType]*job) ([]JobIDType, error) {
-	// Initialize in-degrees
-	inDegrees := make(map[JobIDType]int)
-	for id := range jobs {
-		inDegrees[id] = 0
+// topologicalSortJobs returns a slice of JobIDs in the order they should be executed
+// if there is a cycle, it returns ErrCycleDetected
+// if there is a missing dependency, it returns ErrMissingDependency
+func topologicalSortJobs(jobs []GenericJob) ([]JobID, error) {
+	inDegrees := make(map[JobID]int)
+	jobMap := make(map[JobID]GenericJob)
+	dependents := make(map[JobID][]JobID)
+
+	for _, job := range jobs {
+		jobID := job.GetID()
+		inDegrees[jobID] = 0
+		jobMap[jobID] = job
 	}
 
 	for _, job := range jobs {
-		for _, dependency := range job.Dependencies {
-			// check if dependency exists
-			if _, ok := jobs[dependency]; !ok {
+		jobID := job.GetID()
+		for _, depID := range job.GetDependencies() {
+			if _, ok := jobMap[depID]; !ok {
 				return nil, ErrMissingDependency
 			}
-
-			inDegrees[dependency]++
+			inDegrees[jobID]++
+			dependents[depID] = append(dependents[depID], jobID)
 		}
 	}
 
-	// Create a priority jobs
-	pq := make(JobIDPriorityQueue, 0)
+	pq := make(idPriorityQueue, 0)
 	heap.Init(&pq)
 
-	// Add jobs with zero in-degree to priority jobs
 	for id, inDegree := range inDegrees {
 		if inDegree == 0 {
-			heap.Push(&pq, JobIDPriority{ID: id, Priority: jobs[id].Priority})
+			heap.Push(&pq, idPriority{id: id, priority: jobMap[id].GetPriority()})
 		}
 	}
 
-	result := make([]JobIDType, 0)
+	result := make([]JobID, 0)
 
 	for len(pq) > 0 {
-
-		jobIDPriority := heap.Pop(&pq).(JobIDPriority)
-		jobID := jobIDPriority.ID
-
+		idPrio := heap.Pop(&pq).(idPriority)
+		jobID := idPrio.id
 		result = append(result, jobID)
 
-		for _, dependentJobID := range jobs[jobID].Dependencies {
-			inDegrees[dependentJobID]--
-			if inDegrees[dependentJobID] == 0 {
-				heap.Push(&pq, JobIDPriority{ID: dependentJobID, Priority: jobs[dependentJobID].Priority})
+		for _, dependent := range dependents[jobID] {
+			inDegrees[dependent]--
+			if inDegrees[dependent] == 0 {
+				heap.Push(&pq, idPriority{id: dependent, priority: jobMap[dependent].GetPriority()})
 			}
 		}
-
 	}
 
-	// Check for cycles
 	for _, inDegree := range inDegrees {
 		if inDegree > 0 {
 			return nil, ErrCycleDetected
diff --git a/topological-sort_test.go b/topological-sort_test.go
index 565ce52..430d515 100644
--- a/topological-sort_test.go
+++ b/topological-sort_test.go
@@ -8,19 +8,18 @@ import (
 func TestTopologicalSortJobs(t *testing.T) {
 	// Create a sample set of jobs with dependencies and priorities
 
-	job1 := &job{JobSpecification: JobSpecification{Id: "1", Priority: PriorityHigh}}
+	job1 := &Job[string]{id: "1", priority: PriorityDefault}
+	job2 := &Job[string]{id: "2", priority: PriorityHigh, dependencies: []JobID{"1"}}
+	job3 := &Job[string]{id: "3", priority: PriorityLow, dependencies: []JobID{"1"}}
+	job4 := &Job[string]{id: "4", priority: PriorityCritical, dependencies: []JobID{"3"}}
+	job5 := &Job[string]{id: "5", dependencies: []JobID{"2", "4"}}
 
-	job2 := &job{JobSpecification: JobSpecification{Id: "2", Priority: PriorityHigh, Dependencies: []JobIDType{"1"}}}
-	job3 := &job{JobSpecification: JobSpecification{Id: "3", Priority: PriorityLow, Dependencies: []JobIDType{"1"}}}
-	job4 := &job{JobSpecification: JobSpecification{Id: "4", Priority: PriorityCritical, Dependencies: []JobIDType{"3"}}}
-	job5 := &job{JobSpecification: JobSpecification{Id: "5", Dependencies: []JobIDType{"2", "4"}}}
-
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
-		"3": job3,
-		"4": job4,
-		"5": job5,
+	jobs := []GenericJob{
+		job1,
+		job2,
+		job3,
+		job4,
+		job5,
 	}
 
 	// Call the function to get the sorted job IDs
@@ -30,7 +29,7 @@ func TestTopologicalSortJobs(t *testing.T) {
 	}
 
 	// Define the expected order
-	expectedOrder := []JobIDType{"5", "4", "2", "3", "1"}
+	expectedOrder := []JobID{"1", "2", "3", "4", "5"}
 
 	// Check if the result matches the expected order
 	if !reflect.DeepEqual(sortedJobIDs, expectedOrder) {
@@ -41,19 +40,18 @@ func TestTopologicalSortJobs(t *testing.T) {
 func TestTopologicalSortJobs2(t *testing.T) {
 	// Create a sample set of jobs with dependencies and priorities
 
-	job1 := &job{JobSpecification: JobSpecification{Id: "1", Priority: PriorityHigh}}
-
-	job2 := &job{JobSpecification: JobSpecification{Id: "2", Priority: PriorityHigh, Dependencies: []JobIDType{"1"}}}
-	job3 := &job{JobSpecification: JobSpecification{Id: "3", Priority: PriorityLow, Dependencies: []JobIDType{"1"}}}
-	job4 := &job{JobSpecification: JobSpecification{Id: "4", Priority: PriorityCritical, Dependencies: []JobIDType{"3"}}}
-	job5 := &job{JobSpecification: JobSpecification{Id: "5", Dependencies: []JobIDType{"2", "4"}}}
+	job1 := &Job[string]{id: "1", priority: PriorityHigh}
+	job2 := &Job[string]{id: "2", priority: PriorityHigh, dependencies: []JobID{"1"}}
+	job3 := &Job[string]{id: "3", priority: PriorityLow, dependencies: []JobID{"1"}}
+	job4 := &Job[string]{id: "4", priority: PriorityCritical, dependencies: []JobID{"3"}}
+	job5 := &Job[string]{id: "5", dependencies: []JobID{"2", "4"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
-		"3": job3,
-		"4": job4,
-		"5": job5,
+	jobs := []GenericJob{
+		job1,
+		job2,
+		job3,
+		job4,
+		job5,
 	}
 
 	// Call the function to get the sorted job IDs
@@ -63,7 +61,7 @@ func TestTopologicalSortJobs2(t *testing.T) {
 	}
 
 	// Define the expected order
-	expectedOrder := []JobIDType{"5", "4", "2", "3", "1"}
+	expectedOrder := []JobID{"1", "2", "3", "4", "5"}
 
 	// Check if the result matches the expected order
 	if !reflect.DeepEqual(sortedJobIDs, expectedOrder) {
@@ -71,16 +69,16 @@ func TestTopologicalSortJobs2(t *testing.T) {
 	}
 }
 
-func TestTopologicalSortJobsNoDependencies(t *testing.T) {
+func TestTopologicalSortJobsNodePendencies(t *testing.T) {
 	// Create a sample set of jobs with no dependencies
-	job1 := &job{JobSpecification: JobSpecification{Id: "1"}}
-	job2 := &job{JobSpecification: JobSpecification{Id: "2"}}
-	job3 := &job{JobSpecification: JobSpecification{Id: "3"}}
+	job1 := &Job[string]{id: "1"}
+	job2 := &Job[string]{id: "2"}
+	job3 := &Job[string]{id: "3"}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
-		"3": job3,
+	jobs := []GenericJob{
+		job1,
+		job2,
+		job3,
 	}
 
 	// Call the function to get the sorted job IDs
@@ -90,7 +88,7 @@ func TestTopologicalSortJobsNoDependencies(t *testing.T) {
 	}
 
 	// Define the expected order (in any order because they have no dependencies)
-	expectedOrder := []JobIDType{"1", "2", "3"}
+	expectedOrder := []JobID{"3", "2", "1"}
 
 	// Check if the result contains the same elements as the expected order
 	if len(sortedJobIDs) != len(expectedOrder) {
@@ -99,7 +97,7 @@ func TestTopologicalSortJobsNoDependencies(t *testing.T) {
 }
 
 func TestTopologicalSortJobs_EmptyMap(t *testing.T) {
-	jobs := map[JobIDType]*job{}
+	jobs := []GenericJob{}
 	sortedJobIDs, err := topologicalSortJobs(jobs)
 	if err != nil {
 		t.Errorf("Error in sorting jobs: %v", err)
@@ -111,14 +109,14 @@ func TestTopologicalSortJobs_EmptyMap(t *testing.T) {
 
 func TestTopologicalSortJobs_CycleDetected(t *testing.T) {
 	// Creating a cycle 1 -> 2 -> 3 -> 1
-	job1 := &job{JobSpecification: JobSpecification{Id: "1", Dependencies: []JobIDType{"3"}}}
-	job2 := &job{JobSpecification: JobSpecification{Id: "2", Dependencies: []JobIDType{"1"}}}
-	job3 := &job{JobSpecification: JobSpecification{Id: "3", Dependencies: []JobIDType{"2"}}}
+	job1 := &Job[string]{id: "1", dependencies: []JobID{"3"}}
+	job2 := &Job[string]{id: "2", dependencies: []JobID{"1"}}
+	job3 := &Job[string]{id: "3", dependencies: []JobID{"2"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
-		"3": job3,
+	jobs := []GenericJob{
+		job1,
+		job2,
+		job3,
 	}
 
 	_, err := topologicalSortJobs(jobs)
@@ -128,28 +126,28 @@ func TestTopologicalSortJobs_CycleDetected(t *testing.T) {
 }
 
 func TestTopologicalSortJobs_SingleNode(t *testing.T) {
-	job1 := &job{JobSpecification: JobSpecification{Id: "1"}}
+	job1 := &Job[string]{id: "1"}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
+	jobs := []GenericJob{
+		job1,
 	}
 
 	sortedJobIDs, err := topologicalSortJobs(jobs)
 	if err != nil {
 		t.Errorf("Error in sorting jobs: %v", err)
 	}
-	if !reflect.DeepEqual(sortedJobIDs, []JobIDType{"1"}) {
+	if !reflect.DeepEqual(sortedJobIDs, []JobID{"1"}) {
 		t.Errorf("Expected [\"1\"], got %v", sortedJobIDs)
 	}
 }
 
 func TestTopologicalSortJobs_MissingDependency(t *testing.T) {
-	job1 := &job{JobSpecification: JobSpecification{Id: "1"}}
-	job2 := &job{JobSpecification: JobSpecification{Id: "2", Dependencies: []JobIDType{"3"}}}
+	job1 := &Job[string]{id: "1"}
+	job2 := &Job[string]{id: "2", dependencies: []JobID{"3"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
+	jobs := []GenericJob{
+		job1,
+		job2,
 	}
 
 	_, err := topologicalSortJobs(jobs)
@@ -160,10 +158,10 @@ func TestTopologicalSortJobs_MissingDependency(t *testing.T) {
 
 func TestTopologicalSortJobs_SelfDependency(t *testing.T) {
 	// job 1 depends on itself
-	job1 := &job{JobSpecification: JobSpecification{Id: "1", Dependencies: []JobIDType{"1"}}}
+	job1 := &Job[string]{id: "1", dependencies: []JobID{"1"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
+	jobs := []GenericJob{
+		job1,
 	}
 
 	_, err := topologicalSortJobs(jobs)
@@ -174,56 +172,56 @@ func TestTopologicalSortJobs_SelfDependency(t *testing.T) {
 
 func TestTopologicalSortJobs_MultipleEdges(t *testing.T) {
 	// job 3 and job 4 both depend on job 2
-	job1 := &job{JobSpecification: JobSpecification{Id: "1"}}
-	job2 := &job{JobSpecification: JobSpecification{Id: "2", Dependencies: []JobIDType{"1"}}}
-	job3 := &job{JobSpecification: JobSpecification{Id: "3", Dependencies: []JobIDType{"2"}}}
-	job4 := &job{JobSpecification: JobSpecification{Id: "4", Dependencies: []JobIDType{"2"}}}
+	job1 := &Job[string]{id: "1"}
+	job2 := &Job[string]{id: "2", dependencies: []JobID{"1"}}
+	job3 := &Job[string]{id: "3", dependencies: []JobID{"2"}}
+	job4 := &Job[string]{id: "4", dependencies: []JobID{"2"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
-		"3": job3,
-		"4": job4,
+	jobs := []GenericJob{
+		job1,
+		job2,
+		job3,
+		job4,
 	}
 
 	sortedJobIDs, err := topologicalSortJobs(jobs)
 	if err != nil {
 		t.Errorf("Error in sorting jobs: %v", err)
 	}
-	if !reflect.DeepEqual(sortedJobIDs, []JobIDType{"4", "3", "2", "1"}) && !reflect.DeepEqual(sortedJobIDs, []JobIDType{"3", "4", "2", "1"}) {
+	if !reflect.DeepEqual(sortedJobIDs, []JobID{"1", "2", "3", "4"}) && !reflect.DeepEqual(sortedJobIDs, []JobID{"1", "2", "4", "3"}) {
 		t.Errorf("Unexpected order: %v", sortedJobIDs)
 	}
 }
 
-func TestTopologicalSortJobs_MultipleDependencies(t *testing.T) {
+func TestTopologicalSortJobs_Multipledependencies(t *testing.T) {
 	// job 3 depends on both job 1 and job 2
-	job1 := &job{JobSpecification: JobSpecification{Id: "1"}}
-	job2 := &job{JobSpecification: JobSpecification{Id: "2"}}
-	job3 := &job{JobSpecification: JobSpecification{Id: "3", Dependencies: []JobIDType{"1", "2"}}}
+	job1 := &Job[string]{id: "1"}
+	job2 := &Job[string]{id: "2"}
+	job3 := &Job[string]{id: "3", dependencies: []JobID{"1", "2"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
-		"3": job3,
+	jobs := []GenericJob{
+		job1,
+		job2,
+		job3,
 	}
 
 	sortedJobIDs, err := topologicalSortJobs(jobs)
 	if err != nil {
 		t.Errorf("Error in sorting jobs: %v", err)
 	}
-	if !reflect.DeepEqual(sortedJobIDs, []JobIDType{"3", "2", "1"}) && !reflect.DeepEqual(sortedJobIDs, []JobIDType{"3", "1", "2"}) {
+	if !reflect.DeepEqual(sortedJobIDs, []JobID{"1", "2", "3"}) && !reflect.DeepEqual(sortedJobIDs, []JobID{"2", "1", "3"}) {
 		t.Errorf("Unexpected order: %v", sortedJobIDs)
 	}
 }
 
 func TestTopologicalSortJobs_PriorityIgnoredInCycle(t *testing.T) {
 	// Cycle exists even if one job has high priority
-	job1 := &job{JobSpecification: JobSpecification{Id: "1", Priority: PriorityHigh, Dependencies: []JobIDType{"2"}}}
-	job2 := &job{JobSpecification: JobSpecification{Id: "2", Dependencies: []JobIDType{"1"}}}
+	job1 := &Job[string]{id: "1", priority: PriorityHigh, dependencies: []JobID{"2"}}
+	job2 := &Job[string]{id: "2", dependencies: []JobID{"1"}}
 
-	jobs := map[JobIDType]*job{
-		"1": job1,
-		"2": job2,
+	jobs := []GenericJob{
+		job1,
+		job2,
 	}
 
 	_, err := topologicalSortJobs(jobs)
@@ -231,3 +229,19 @@ func TestTopologicalSortJobs_PriorityIgnoredInCycle(t *testing.T) {
 		t.Errorf("Expected ErrCycleDetected, got %v", err)
 	}
 }
+
+func TestTopologicalSortJobs_IsNotAvailable(t *testing.T) {
+	// Cycle exists even if one job has high priority
+	job1 := &Job[string]{id: "1", priority: PriorityHigh, dependencies: []JobID{"3"}}
+	job2 := &Job[string]{id: "2"}
+
+	jobs := []GenericJob{
+		job1,
+		job2,
+	}
+
+	_, err := topologicalSortJobs(jobs)
+	if err != ErrMissingDependency {
+		t.Errorf("Expected ErrMissingDependency, got %v", err)
+	}
+}
diff --git a/worker.go b/worker.go
new file mode 100644
index 0000000..9f38e8a
--- /dev/null
+++ b/worker.go
@@ -0,0 +1,179 @@
+package jobqueue
+
+import (
+	"context"
+	"sync"
+	"time"
+)
+
+type WorkerStatus int
+
+const (
+	WorkerStatusStopped = iota
+	WorkerStatusRunning
+)
+
+type WorkerID string
+
+// Worker is a worker
+type Worker interface {
+	Start() error
+	Stop() error
+	Status() WorkerStatus
+	AssignJob(job GenericJob) error
+
+	GetID() WorkerID
+}
+
+// GenericWorker is a generic worker
+type GenericWorker struct {
+	ID     WorkerID
+	status WorkerStatus
+}
+
+// LocalWorker is a worker that runs jobs locally
+type LocalWorker struct {
+	GenericWorker
+	jobChannels []chan GenericJob
+	stopChans   []chan bool
+	cancelChans []chan bool
+	maxJobs     int
+	mu          sync.Mutex
+	wg          sync.WaitGroup
+}
+
+// GetID returns the ID of the worker
+func (w *GenericWorker) GetID() WorkerID {
+	return w.ID
+}
+
+// NewLocalWorker creates a new local worker
+func NewLocalWorker(maxJobs int) *LocalWorker {
+	w := &LocalWorker{maxJobs: maxJobs}
+	w.jobChannels = make([]chan GenericJob, maxJobs)
+	w.stopChans = make([]chan bool, maxJobs)
+	w.cancelChans = make([]chan bool, maxJobs)
+	return w
+}
+
+// Start starts the worker
+func (w *LocalWorker) Start() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if w.status == WorkerStatusRunning {
+		return ErrWorkerAlreadyRunning
+	}
+
+	for i := 0; i < w.maxJobs; i++ {
+		w.wg.Add(1)
+		w.jobChannels[i] = make(chan GenericJob)
+		w.stopChans[i] = make(chan bool)
+		w.cancelChans[i] = make(chan bool)
+		go w.run(w.jobChannels[i], w.stopChans[i], w.cancelChans[i])
+	}
+
+	w.wg.Wait()
+	w.status = WorkerStatusRunning
+
+	return nil
+}
+
+// Stop stops the worker
+func (w *LocalWorker) Stop() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if w.status == WorkerStatusStopped {
+		return ErrWorkerNotRunning
+	}
+
+	w.status = WorkerStatusStopped
+	for _, stopChan := range w.stopChans {
+		stopChan <- true
+	}
+
+	return nil
+}
+
+func (w *LocalWorker) run(jobChannel chan GenericJob, stopChan chan bool, cancelChan chan bool) {
+	w.wg.Done()
+
+	for {
+		select {
+		case job := <-jobChannel:
+			ctx, cancel := context.WithCancel(context.Background())
+			retries := job.GetMaxRetries()
+			retryDelay := job.GetRetryDelay()
+
+			if retries == 0 {
+				retries = 1
+			}
+
+			var err error
+			for retries > 0 {
+
+				timeout := job.GetTimeout()
+				if timeout == 0 {
+					timeout = 1 * time.Minute
+				}
+
+				ctxTimeout, cancelTimeout := context.WithTimeout(ctx, timeout)
+				_, err = job.Execute(ctxTimeout)
+				cancelTimeout()
+
+				if err == nil || ctx.Err() == context.Canceled {
+					break
+				}
+
+				if retryDelay > 0 {
+					time.Sleep(retryDelay)
+				}
+
+				retries--
+			}
+
+			select {
+			case <-cancelChan:
+				cancel()
+
+			case <-ctx.Done():
+				cancel()
+
+			default:
+				cancel()
+			}
+		case <-stopChan:
+			return
+		}
+	}
+}
+
+// AssignJob assigns a job to the worker
+func (w *LocalWorker) AssignJob(job GenericJob) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if w.status != WorkerStatusRunning {
+		return ErrWorkerNotRunning
+	}
+
+	for _, ch := range w.jobChannels {
+
+		select {
+		case ch <- job:
+			return nil
+		default:
+			continue
+		}
+	}
+
+	return ErrMaxJobsReached
+}
+
+// Status returns the status of the worker
+func (w *LocalWorker) Status() WorkerStatus {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	return w.status
+}
diff --git a/worker_test.go b/worker_test.go
new file mode 100644
index 0000000..8ad221a
--- /dev/null
+++ b/worker_test.go
@@ -0,0 +1,204 @@
+package jobqueue
+
+import (
+	"context"
+	assert "github.com/stretchr/testify/require"
+	"testing"
+	"time"
+)
+
+type DummyJob struct {
+	id JobID
+}
+
+func (j DummyJob) GetID() JobID {
+	return j.id
+}
+
+func (j DummyJob) GetMaxRetries() uint {
+	return 0
+}
+
+func (j DummyJob) GetRetryDelay() time.Duration {
+	return 0
+}
+
+func (j DummyJob) GetTimeout() time.Duration {
+	return 0
+}
+
+func (j DummyJob) Execute(_ context.Context) (RunGenericResult, error) {
+	time.Sleep(100 * time.Millisecond)
+	return nil, nil
+}
+
+func (j DummyJob) Cancel() error {
+	return nil
+}
+
+func (j DummyJob) GetDependencies() []JobID {
+	return []JobID{}
+}
+
+func (j DummyJob) GetPriority() Priority {
+	return PriorityDefault
+}
+
+func TestAssignJob(t *testing.T) {
+	worker := NewLocalWorker(1)
+	err := worker.Start()
+	if err != nil {
+		t.Errorf("Start() returned error: %v", err)
+	}
+
+	job := DummyJob{id: JobID("1")}
+
+	// Test assigning a job
+	err = worker.AssignJob(job)
+	if err != nil {
+		t.Errorf("AssignJob() returned error: %v", err)
+	}
+
+	// Test maxJobs limit
+	job2 := DummyJob{id: JobID("2")}
+	err = worker.AssignJob(job2)
+	if err != ErrMaxJobsReached {
+		t.Errorf("AssignJob() should return ErrMaxJobsReached, got: %v", err)
+	}
+
+	err = worker.Stop()
+	if err != nil {
+		t.Errorf("Stop() returned error: %v", err)
+	}
+}
+
+func TestWorkerLifeCycle(t *testing.T) {
+	worker := NewLocalWorker(1)
+
+	// Test initial status
+	if worker.Status() != WorkerStatusStopped {
+		t.Errorf("Initial worker status should be WorkerStatusStopped")
+	}
+
+	// Test start
+	_ = worker.Start()
+	if worker.Status() != WorkerStatusRunning {
+		t.Errorf("Worker status should be WorkerStatusRunning after Start()")
+	}
+
+	// Test job assignment
+	job := DummyJob{id: JobID("1")}
+	err := worker.AssignJob(job)
+	if err != nil {
+		t.Errorf("AssignJob() returned error: %v", err)
+	}
+
+	// Test job cancellation
+	//worker.CancelJob(JobID("1"))
+
+	// Test stop
+	_ = worker.Stop()
+	if worker.Status() != WorkerStatusStopped {
+		t.Errorf("Worker status should be WorkerStatusStopped after Stop()")
+	}
+}
+
+func TestWorkerLifeCycle2(t *testing.T) {
+	worker := NewLocalWorker(2)
+	if worker.Status() != WorkerStatusStopped {
+		t.Errorf("Newly created worker should be in Stopped state")
+	}
+
+	// Start the worker
+	err := worker.Start()
+	assert.NoError(t, err)
+
+	if worker.Status() != WorkerStatusRunning {
+		t.Errorf("Worker should be in Running state after Start()")
+	}
+
+	// Assign jobs
+	job1 := DummyJob{id: "job1"}
+	err = worker.AssignJob(job1)
+	if err != nil {
+		t.Errorf("Failed to assign job1: %v", err)
+	}
+
+	job2 := DummyJob{id: "job2"}
+	err = worker.AssignJob(job2)
+	if err != nil {
+		t.Errorf("Failed to assign job2: %v", err)
+	}
+
+	// Check maxJobs limit
+	job3 := DummyJob{id: "job3"}
+	err = worker.AssignJob(job3)
+	if err != ErrMaxJobsReached {
+		t.Errorf("Expected ErrMaxJobsReached, got: %v", err)
+	}
+
+	// Stop the worker
+	err = worker.Stop()
+	assert.NoError(t, err)
+
+	if worker.Status() != WorkerStatusStopped {
+		t.Errorf("Worker should be in Stopped state after Stop()")
+	}
+
+	// Make sure we can't assign jobs when worker is stopped
+	err = worker.AssignJob(job1)
+	if err != ErrWorkerNotRunning {
+		t.Errorf("Expected ErrWorkerNotRunning, got: %v", err)
+	}
+
+	// Check if jobs are cancellable
+	err = worker.Start()
+	assert.NoError(t, err)
+
+	err = worker.AssignJob(job1)
+	if err != nil {
+		t.Errorf("Failed to assign job1: %v", err)
+	}
+	//worker.CancelJob("job1")
+
+	// Check if Stop() actually stops the jobs
+	err = worker.AssignJob(DummyJob{id: "longJob"})
+	assert.NoError(t, err)
+
+	err = worker.Stop()
+	assert.NoError(t, err)
+
+	if worker.Status() != WorkerStatusStopped {
+		t.Errorf("Worker should be in Stopped state after Stop()")
+	}
+
+	// Wait for some time to make sure jobs are actually stopped
+	time.Sleep(1 * time.Second)
+	if worker.Status() != WorkerStatusStopped {
+		t.Errorf("Worker should remain in Stopped state")
+	}
+}
+
+func TestCancelJob(t *testing.T) {
+	worker := NewLocalWorker(1)
+	err := worker.Start()
+	if err != nil {
+		t.Errorf("Start() returned error: %v", err)
+	}
+
+	job := DummyJob{id: JobID("1")}
+
+	// Zuweisung eines Jobs
+	err = worker.AssignJob(job)
+	if err != nil {
+		t.Errorf("AssignJob() returned error: %v", err)
+	}
+
+	// Test job cancellation
+	//worker.CancelJob(JobID("1"))
+
+	err = worker.Stop()
+	if err != nil {
+		t.Errorf("Stop() returned error: %v", err)
+	}
+}
-- 
GitLab