Compare commits

...

16 Commits
main ... dev

Author SHA1 Message Date
e1ab3f28d6 fix: log timestamps are now updated 2024-09-21 10:01:34 +02:00
aae388897e feat: add messagequeue api 2024-09-21 10:01:34 +02:00
99174939f5 feat: handle sigint properly 2024-09-09 07:21:09 +02:00
bdf3a54ba0 fix: add missing copyright notices 2024-09-08 21:13:41 +02:00
5ba8a9f063 fix: solve leak sanitizer on graceful shutdown
I am testing by simply calling `pipeline_cancel` on the first pipeline
execution.
2024-09-08 21:09:48 +02:00
7e80274784 feat: add hot-reloading support
Just launch and edit the pipeline.conf file.
2024-09-02 20:42:22 +02:00
4ba2362f5c fix: slightly better trigger dir management 2024-08-31 08:27:33 +02:00
fa6ec315c8 feat: unique directory for pipelines
Also, start using fork/chdir/exec idiom instead of posix_spawn, because
as we all know: posix_spawn is stupid as a system call:

https://lwn.net/Articles/360556/
2024-08-31 08:05:44 +02:00
22075e17e1 fix: remove PATH default env
spawnp searches through PATH for you
2024-08-29 18:21:43 +02:00
fd0eb59aae fix: some cleanup 2024-08-29 18:18:29 +02:00
484efc200a feat: add environment variable passing 2024-08-26 20:48:22 +02:00
cfffa43428 feat: introduce sanitizers
had to fix some things
2024-08-26 19:50:48 +02:00
0f1aa982f4 fixup! refactor: move executor function into it's own file 2024-08-26 18:40:23 +02:00
05701d9d85 wip: custom environment variable passing
You should be able to tell your sci deployment which env vars should be
passed to the pipelines with -e ENV1 -e ENV2 and so on
2024-08-25 15:52:43 +02:00
e442800779 fix: move the new utils into utils.h/c 2024-08-25 15:05:22 +02:00
faf362c607 feat: add better shell-like command support
You can now execute any kind of program in the PATH.
You do, however, need to specify "./" if you want to execute a local
file, but that shouldn't be too big a problem.
2024-08-25 09:54:03 +02:00
28 changed files with 946 additions and 224 deletions

View File

@ -1,6 +0,0 @@
# TODO: use alpine when available
FROM debian:12-slim
ADD artifacts.tar.gz /install
RUN dpkg -i /install/artifacts/sci_*-1_amd64.deb
RUN rm -rf /install
ENTRYPOINT ["sci"]

19
.sci.sh
View File

@ -2,6 +2,7 @@
set -e set -e
echo ">>> checking if required environment is set..." echo ">>> checking if required environment is set..."
test -n "$DOCKER_TOKEN" test -n "$DOCKER_TOKEN"
which make
echo ">>> compiling..." echo ">>> compiling..."
make make
@ -12,7 +13,7 @@ make dist
SRC_SHA256=$(sha256sum "sci-${VERSION}.tar.gz" | awk '{ print $1 }') SRC_SHA256=$(sha256sum "sci-${VERSION}.tar.gz" | awk '{ print $1 }')
sed "s/SRC_SHA256/${SRC_SHA256}/g" < PKGBUILD.in > PKGBUILD sed "s/SRC_SHA256/${SRC_SHA256}/g" < PKGBUILD.in > PKGBUILD
# arch # # arch
echo ">>> building archbuilder image..." echo ">>> building archbuilder image..."
docker build -t archbuilder -f arch-builder.dockerfile . docker build -t archbuilder -f arch-builder.dockerfile .
@ -29,7 +30,7 @@ echo ">>> building debbuilder image..."
docker build -t debbuilder -f deb-builder.dockerfile . docker build -t debbuilder -f deb-builder.dockerfile .
echo ">>> building .deb in debbuilder docker image..." echo ">>> building .deb in debbuilder docker image..."
docker run --rm -it -v .:/src -e VERSION debbuilder sh -c '\ docker run --rm -it -v .:/src -e VERSION -e DOCKER_TOKEN debbuilder sh -c '\
cd && \ cd && \
mkdir -p artifacts && \ mkdir -p artifacts && \
cp /src/sci-$VERSION.tar.gz . && \ cp /src/sci-$VERSION.tar.gz . && \
@ -46,14 +47,8 @@ docker run --rm -it -v .:/src -e VERSION debbuilder sh -c '\
cp ../*.tar.xz ~/artifacts && \ cp ../*.tar.xz ~/artifacts && \
cp ../*.tar.gz ~/artifacts && \ cp ../*.tar.gz ~/artifacts && \
cd && \ cd && \
tar czf /src/artifacts.tar.gz artifacts curl --user agj:$DOCKER_TOKEN \
--upload-file sci_$VERSION-1_amd64.deb \
"https://git.gtz.dk/api/packages/agj/debian/pool/bionic/main/upload"
' '
# TODO: push-user should be some sci-bot or something, not your account. This will do for now though
echo ">>> building sci docker image..."
export OWNER="git.gtz.dk/agj"
docker build -t ${OWNER}/sci:${VERSION} -t ${OWNER}/sci:latest -f .dockerfile .
echo ">>> pushing latest docker image..."
# TODO: user should be some sci-bot or something, not your account. This will do for now though
docker login git.gtz.dk -u agj -p "$DOCKER_TOKEN"
docker push ${OWNER}/sci:latest

View File

@ -18,9 +18,12 @@ CFLAGS += -DSCI_NAME="\"$(NAME)\""
CFLAGS += -DSCI_DESCRIPTION="\"$(DESCRIPTION)\"" CFLAGS += -DSCI_DESCRIPTION="\"$(DESCRIPTION)\""
CFLAGS += -D_POSIX_C_SOURCE=2 CFLAGS += -D_POSIX_C_SOURCE=2
CFLAGS += -D_GNU_SOURCE CFLAGS += -D_GNU_SOURCE
CFLAGS += -Wall -Werror -std=c11 -g CFLAGS += -Wall -Werror -std=c11
CFLAGS += -Iinclude CFLAGS += -Iinclude
CFLAGS += -lpthread -luuid CFLAGS += -lpthread -luuid -lrt
CFLAGS += -fsanitize=address
CFLAGS += -fsanitize=undefined
CFLAGS += -g
.PHONY: all clean dist install .PHONY: all clean dist install
@ -29,14 +32,17 @@ all: out/bin/sci
out/obj/%.o: src/%.c | $(OBJDIR) out/obj/%.o: src/%.c | $(OBJDIR)
$(CC) -c $? $(CFLAGS) -o $@ $(CC) -c $? $(CFLAGS) -o $@
OBJ += out/obj/main.o OBJ += out/obj/api.o
OBJ += out/obj/cli.o OBJ += out/obj/cli.o
OBJ += out/obj/executor.o
OBJ += out/obj/log.o OBJ += out/obj/log.o
OBJ += out/obj/main.o
OBJ += out/obj/notify.o OBJ += out/obj/notify.o
OBJ += out/obj/util.o
OBJ += out/obj/pipeline.o OBJ += out/obj/pipeline.o
OBJ += out/obj/strlist.o
OBJ += out/obj/threadlist.o OBJ += out/obj/threadlist.o
OBJ += out/obj/threadpool.o OBJ += out/obj/threadpool.o
OBJ += out/obj/util.o
out/bin/sci: $(OBJ) | $(BINDIR) out/bin/sci: $(OBJ) | $(BINDIR)
$(CC) -o $@ $^ $(CFLAGS) $(CC) -o $@ $^ $(CFLAGS)

51
TODO.md
View File

@ -8,7 +8,7 @@
- [x] Fourth things fourth, implement a prototype that reads a space-separated file and populates a struct. - [x] Fourth things fourth, implement a prototype that reads a space-separated file and populates a struct.
- [x] Fifth things fifth, implement a prototype that spawns a new thread that executes a shell command. - [x] Fifth things fifth, implement a prototype that spawns a new thread that executes a shell command.
- [x] Sixth things sixth, daemonize it! - [x] Sixth things sixth, daemonize it!
- [ ] Seventh things seventh, package the sucker (arch, debian, alpine, docker) - [x] Seventh things seventh, package the sucker (arch, debian, alpine, docker)
- [x] archlinux - [x] archlinux
- https://wiki.archlinux.org/title/Creating_packages - https://wiki.archlinux.org/title/Creating_packages
- [x] debian - [x] debian
@ -16,16 +16,22 @@
- just use docker. - just use docker.
- [-] ~~alpine~~ later. - [-] ~~alpine~~ later.
- [-] ~~docker~~ later. - [-] ~~docker~~ later.
- [ ] Eight things eight, try it out! - maybe even write the python webhook extension. - [x] Eight things eight, try it out! - maybe even write the python webhook extension.
- [ ] Ninth things ninth, fix bugs, see below - [x] Port this document to gitea issue tracking
- [x] enable PATH-able programs and argv in the command section
- [x] custom environment variable passing. Something like `-e MY_TOKEN` ala docker-style
- [x] address sanitizers please.
- [ ] Ninth things ninth, fix bugs, see https://git.gtz.dk/agj/sci/projects/1
- [ ] docstring in all header files
- [ ] Tenth things tenth, write manpages, choose license - [ ] Tenth things tenth, write manpages, choose license
- [ ] Eleventh things Eleventh, polish - [ ] Eleventh things Eleventh, polish
- [ ] Twelveth things last, release! - [ ] Twelveth things last, release!
- Setup gitea.gtz.dk (will learn you how to set up subdomains (useful for shop.gtz.dk)) - [x] Setup git.gtz.dk (will learn you how to set up subdomains (useful for shop.gtz.dk))
- [ ] -1th things -1th, write a blog post about the tool (also set up your blog.gtz.dk)
BOOKMARK: okay. Now it feels like it's getting complicated. I want to run `sci` in a docker container. But that means Okay. Now it feels like it's getting complicated. I want to run `sci` in a docker container. But that means
that the build-threads also run in that docker container - meaning the container should have all the build dependencies that the build-threads also run in that docker container - meaning the container should have all the build dependencies
installed and we all know where that rabbithole goes. 9-30YB docker images with about a trillion unique build systems. installed and we all know where that rabbithole goes. 9-30YiB docker images with about a trillion unique build systems.
Let's not do that. Let's not do that.
The only alternative I can see is that the `sci` service is just not dockerized. The pipeline scripts can easily be The only alternative I can see is that the `sci` service is just not dockerized. The pipeline scripts can easily be
dockerized themselves. Just have a `scripts/wget-src-dist-and-sci-sh-dockerized.sh` with `arg1` being the docker image dockerized themselves. Just have a `scripts/wget-src-dist-and-sci-sh-dockerized.sh` with `arg1` being the docker image
@ -33,12 +39,24 @@ to use?
```sh ```sh
#!/bin/sh #!/bin/sh
wget "$SCI_PIPELINE_URL" wget "$SCI_PIPELINE_URL"
docker run --rm -it --mount type=bind,source="$(pwd)"/thefileyouwgot.tar.gz,target=/thefileyouwgot.tar.gz,readonly --entrypoint sh $2 docker run --rm -it -v .:/src -w /src $@
``` ```
Or something like that... Perhaps we can figure something out with an inline `ADD`, that also extracts the archive in Or something like that... Perhaps we can figure something out with an inline `ADD`, that also extracts the archive in
the container or something. This approach is cleaner IMO. You can also more easily edit the `pipelines.conf` file if you the container or something. This approach is cleaner IMO. You can also more easily edit the `pipelines.conf` file if you
need to. need to.
The aforementioned rabbithole went like this:
- Let's say that `sci` is run inside a docker container.
This would make it very easy to deploy, but:
- Since pipelines are executed in the same environment as `sci`, either:
The `sci` container must be all-encompassing. i.e. it contains every single build system and scriptling language that
could possibly be used by any kind of user or; all pipelines must be run from a docker container themselves, meaning
that the `sci` container must have `dind`-privileges. Either option is suboptimal and will lock users into one way of
using `sci`, which is bad.
- Conclusion: Fuck docker. All environment management is delegated to the user and is not `sci`'s responsibility!
`sci` will always be run on the ci-machine itself, unless a user has provided a custom docker image, which is fine
and doesn't burden the `sci` project.
You were getting the following `pipelines.conf` file to work: You were getting the following `pipelines.conf` file to work:
``` ```
scih-dev ssh://git@git.gtz.dk:222/agj/scih.git scih-onpush /etc/sci/scripts/git-clone-and-run-sci-sh.sh scih-dev ssh://git@git.gtz.dk:222/agj/scih.git scih-onpush /etc/sci/scripts/git-clone-and-run-sci-sh.sh
@ -54,25 +72,6 @@ alpine linux is using OpenRC (cool), which complicates things a little bit, but
generally really well written. Otherwise, I am sure that both wiki.gentoo and wiki.archlinux have great pages too generally really well written. Otherwise, I am sure that both wiki.gentoo and wiki.archlinux have great pages too
docker is super easy, just make a dockerfile - only concern is the trigger files. docker is super easy, just make a dockerfile - only concern is the trigger files.
#### Bugs / Missing Features
- [x] command output is being inherited. It should be piped into some random log-file
- [ ] pretty sure that `ctrl+c` / SIGINT is not graceful yet.
- [ ] missing license (heavily considering GPLv3)
- [ ] pipeline scripts should be executed in a unique `/tmp` dir
- [ ] Some way for third parties to see which pipelines are currently running and their status.
- Could be as simple as looking in the logs directory.
- How to mark a run as failed / success / warn?
- Third parties may need to extract artifacts.
or maybe the scripts themselves would upload artifacts?
- [ ] I am deliberately not using `Restart=on-failure` in the `scid.service` file because we are using `Type=exec`
and not `Type=notify` (yet) - which would require a `sd_notify` call of `READY=1` (see `man systemd.service`)
- [ ] Custom environment variables passed to the pipelines on invokation should be possible.
- [ ] Listener threads should be killed and restarted (worker pool should just chug along) when pipeline config file
has changed during runtime. Should be disableable with `--no-hot-reload-config` - i.e. on by default.
- [ ] `docker stop` is very slow. I am probably not handling signals properly yet.
- [x] It seems that `-v 4` is segfaulting when running release builds, maybe the logger just cant find the source file?
Nope. I just wrote some bad code (inverted NULL check).
### Note Regarding `inotify` usage ### Note Regarding `inotify` usage
From the manpage: From the manpage:
``` ```

27
include/api.h Normal file
View File

@ -0,0 +1,27 @@
#ifndef SCI_API_H
#define SCI_API_H
// Start the api. This will also trigger an "sci started" event.
// Note that this is a blocking call.
void* api_start(void*);
// Fork the process and have the child run the api.
void api_start_p();
// Destroy all listeners and release the message queue.
void api_destroy();
// Post a newline-separated string with pipeline_id entries
// of the currently running pipelines on the message queue.
void api_list_running_pipelines();
// Trigger a pipeline started event.
void api_pipeline_started(const char* pipeline_id, const char* name);
// Trigger a pipeline ended event.
void api_pipeline_ended(const char* pipeline_id, const char* name, int exit_code);
// Trigger an api started event.
void api_started();
#endif // !SCI_API_H

View File

@ -29,6 +29,8 @@ typedef struct {
bool use_colors; bool use_colors;
optional_str log_file; optional_str log_file;
optional_str pipeline_log_dir; optional_str pipeline_log_dir;
optional_str pipeline_cwd;
optional_strlist environment_vars;
} cli_options; } cli_options;
// Construct a new cli_options struct instance. // Construct a new cli_options struct instance.

27
include/executor.h Normal file
View File

@ -0,0 +1,27 @@
/**
* sci - a simple ci system
Copyright (C) 2024 Asger Gitz-Johansen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef SCI_EXECUTOR_H
#define SCI_EXECUTOR_H
#include "strlist.h"
void executor(void* pipeline_event);
void set_logdir(const char* logdir);
void set_working_directory(const char* cwd);
void set_shared_environment(const strlist_node* root);
#endif

View File

@ -21,9 +21,12 @@
#include <sys/inotify.h> #include <sys/inotify.h>
typedef void(*notify_callback)(pipeline_event* const); typedef void(*notify_callback)(pipeline_event* const);
typedef void(*config_change_callback)();
// Start listening for changes to the provided file. // Start listening for changes to the provided file.
// Note that the `struct inotify_event*` provided is a managed pointer. // Note that the `struct inotify_event*` provided is a managed pointer.
void listen_for_changes(const pipeline_conf* config, notify_callback callback); void listen_for_changes(const pipeline_conf* config, notify_callback callback);
void listen_for_config_changes(const char* config_filepath, config_change_callback callback);
#endif #endif

View File

@ -17,6 +17,7 @@
*/ */
#ifndef SCI_OPTIONAL_H #ifndef SCI_OPTIONAL_H
#define SCI_OPTIONAL_H #define SCI_OPTIONAL_H
#include "strlist.h"
#include <stdbool.h> #include <stdbool.h>
#define optional_type(type) struct { bool has_value; type value; } #define optional_type(type) struct { bool has_value; type value; }
@ -24,5 +25,6 @@ typedef optional_type(int) optional_int;
typedef optional_type(float) optional_float; typedef optional_type(float) optional_float;
typedef optional_type(char*) optional_str; typedef optional_type(char*) optional_str;
typedef optional_type(const char*) optional_cstr; typedef optional_type(const char*) optional_cstr;
typedef optional_type(strlist_node*) optional_strlist;
#endif #endif

View File

@ -36,9 +36,13 @@ typedef struct {
char* command; char* command;
} pipeline_event; } pipeline_event;
// create a new pipeline_conf struct instance based on a configuration line. // create a new `pipeline_conf` struct instance based on a configuration line.
optional_pipeline_conf pipeline_create(const char* config_line); optional_pipeline_conf pipeline_create(const char* config_line);
void pipeline_event_destroy(pipeline_event* ev);
void pipeline_destroy(pipeline_conf* conf);
void pipeline_register(pthread_t thread); void pipeline_register(pthread_t thread);
void pipeline_loop(); void pipeline_loop();
void pipeline_cancel();
int pipeline_count();
#endif #endif

58
include/strlist.h Normal file
View File

@ -0,0 +1,58 @@
/**
* sci - a simple ci system
Copyright (C) 2024 Asger Gitz-Johansen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef SCI_STRLIST_H
#define SCI_STRLIST_H
// doubly linked list
typedef struct strlist_node {
char* str;
struct strlist_node* previous;
struct strlist_node* next;
} strlist_node;
// Create a new root node.
// This function is not threadsafe.
strlist_node* create_strlist_node(char* str);
// Add a new string to the string list.
// This function is not threadsafe.
strlist_node* add_str(char* str, strlist_node* root);
// add a new string list node to the list.
// This function is not threadsafe.
strlist_node* add_str_node(strlist_node* str_node, strlist_node* root);
// Remove a string list node from the list.
// This will free the str and stitch the "previous" and "next" ptrs.
// This function is not threadsafe.
void remove_strlist_node(strlist_node* node);
// Completely clear the list.
// The list is completely invalid after this call and should be discarded.
// root itself will not be free'd by this function, but all content will be.
// This function is not threadsafe.
void clear_strlist(strlist_node* root);
// Convert a strlist to an array of strings.
// Note that this copies the strlist, you still have to free it.
// The array itself is NULL terminated, enabling you to iterate to the end.
// The array should be free'd, as well as each of the entries.
// Returns NULL if the provided strlist is empty.
char** strlist_to_array(strlist_node* root);
#endif

View File

@ -42,9 +42,8 @@ void remove_thread_node(pthread_list_node* node);
// Completely clear the thread list. // Completely clear the thread list.
// This will call pthread_join on all nodes. // This will call pthread_join on all nodes.
// The list is completely invalid after this call and should be discarded. // The list is completely invalid after this call and should be discarded.
// Note: // Even root itself will be free'd by this function so it should be discarded as well.
// - `root` has already been free'd. // This function is not thread-safe.
// - this function is not thread-safe.
void clear_thread_list(pthread_list_node* root); void clear_thread_list(pthread_list_node* root);
#endif #endif

View File

@ -40,5 +40,16 @@ typedef void(*line_handler)(const char*);
void per_line(const char* file, line_handler handler); void per_line(const char* file, line_handler handler);
char* join(const char* a, const char* b); char* join(const char* a, const char* b);
char* join3(const char* a, const char* b, const char* c);
char* join4(const char* a, const char* b, const char* c, const char* d);
char* join5(const char* a, const char* b, const char* c, const char* d, const char* e);
char* join6(const char* a, const char* b, const char* c, const char* d, const char* e, const char* f);
const char* skip_arg(const char* cp);
char* skip_spaces(const char* str);
int count_argc(const char* str);
char** argv_split(const char* str, int* argc_out);
void argv_free(char** argv);
int which(const char* program_name, char* out_full_program, int max_path);
#endif #endif

3
scripts/README.md Normal file
View File

@ -0,0 +1,3 @@
# sci default scripts
This directory contains some default scripts that may or may not be useful to you.
Most of the scripts are fairly simple, but should be installed as part of the sci installation process.

View File

@ -1,8 +0,0 @@
#!/bin/sh
set -e
echo ">>> cloning..."
git clone $SCI_PIPELINE_URL $SCI_PIPELINE_NAME
cd $SCI_PIPELINE_NAME
echo ">>> running .sci.sh..."
time sh .sci.sh

8
scripts/git-clone-and-sci.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
set -ex # print all that we're doing (no need for echo's)
tmpdir=$(mktemp -d)
git clone --depth=1 --recurse-submodules --shallow-submodules -b $1 "$SCI_PIPELINE_URL" "$tmpdir"
shift
cd "$tmpdir"
sh .sci.sh
cd -

13
scripts/wget-and-sci.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
# NOTE: This script assumes that the url is a .tar.gz file.
# TODO: check if $# is >= 1 and give a warning that the extract dir should be provided.
set -ex # print all that we're doing (no need for echo's)
env
tmpdir=$(mktemp -d)
wget "$SCI_PIPELINE_URL" -P "$tmpdir"
cd "$tmpdir"
tar xf *.tar.gz
cd $1
sh .sci.sh
cd -
rm -rf "$tmpdir"

103
src/api.c Normal file
View File

@ -0,0 +1,103 @@
#include "api.h"
#include "log.h"
#include "util.h"
#include <linux/limits.h>
#include <mqueue.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// TODO: Make sure to write a manpage for this api
#define LIST_REQ "list"
#define MQ_MAX_SIZE 8192
bool api_is_running = false;
mqd_t api_out;
mqd_t api_in;
void api_handle_request(const char* request) {
log_trace("api request: '%s'", request);
// list
if(strncmp(request, LIST_REQ, MQ_MAX_SIZE) == 0) {
api_list_running_pipelines();
return;
}
// else
log_error("unrecognized api request: '%s'", request);
}
void* api_start(void* data) {
struct mq_attr attr;
attr.mq_flags = 0;
attr.mq_maxmsg = 10;
attr.mq_msgsize = 512;
attr.mq_curmsgs = 0;
api_out = mq_open("/sci_tx", O_CREAT | O_WRONLY, 0666, &attr);
if(api_out == -1) {
perror("mq_open");
return NULL;
}
api_in = mq_open("/sci_rx", O_CREAT | O_RDONLY, 0666, &attr); // TODO: Consider some better mq names
if(api_in == -1) {
perror("mq_open");
return NULL;
}
api_started();
log_info("api listening for requests");
char msg[MQ_MAX_SIZE];
api_is_running = true;
while(api_is_running) {
memset(msg, '\0', MQ_MAX_SIZE);
if(mq_receive(api_in, msg, MQ_MAX_SIZE, NULL) == -1) {
perror("mq_receive");
return NULL;
}
api_handle_request(msg);
}
return NULL;
}
void api_start_p() {
pthread_t conf_listener;
ASSERT_SYSCALL_SUCCESS(pthread_create(&conf_listener, NULL, &api_start, (void*)NULL));
pthread_setname_np(conf_listener, "sci-api");
}
void api_destroy() {
log_trace("closing api");
api_is_running = false;
mq_unlink("/sci");
}
void api_list_running_pipelines() {
// TODO: you need a way of enumerating the pipeline ids before this can be implemented.
log_error("cannot list running pipelines yet, feature is work-in-progress.");
}
void api_pipeline_started(const char* pipeline_id, const char* name) {
char* msg = join("pipeline_new ", pipeline_id);
if(mq_send(api_out, msg, strnlen(msg, 256), 1) == -1)
perror("mq_send");
free(msg);
}
void api_pipeline_ended(const char* pipeline_id, const char* name, int exit_code) {
char exit_code_str[64];
sprintf(exit_code_str, " %d", exit_code);
char* msg = join6("pipeline_end ", pipeline_id, " ", name, " ", exit_code_str);
if(mq_send(api_out, msg, strnlen(msg, 256), 1) == -1)
perror("mq_send");
free(msg);
}
void api_started() {
if(mq_send(api_out, "sci_started", 12, 1) == -1)
perror("mq_send");
}

View File

@ -15,11 +15,11 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
*/ */
#include "cli.h"
#include <getopt.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <getopt.h>
#include "cli.h"
cli_options new_options() { cli_options new_options() {
cli_options result; cli_options result;
@ -50,6 +50,23 @@ cli_options new_options() {
char* pipeline_log_dir = getenv("SCI_PIPELINE_LOG_DIR"); char* pipeline_log_dir = getenv("SCI_PIPELINE_LOG_DIR");
result.pipeline_log_dir.has_value = pipeline_log_dir != NULL; result.pipeline_log_dir.has_value = pipeline_log_dir != NULL;
result.pipeline_log_dir.value = pipeline_log_dir; result.pipeline_log_dir.value = pipeline_log_dir;
char* pipeline_cwd = getenv("SCI_PIPELINE_CWD");
result.pipeline_cwd.has_value = pipeline_cwd != NULL;
result.pipeline_cwd.value = pipeline_cwd;
char* environment_vars = getenv("SCI_PIPELINE_ENV_VARS");
if(environment_vars == NULL) {
result.environment_vars.has_value = false;
result.environment_vars.value = NULL;
} else {
char* tok = strtok(environment_vars, ";");
result.environment_vars.has_value = true;
result.environment_vars.value = create_strlist_node(tok);
tok = strtok(NULL, ";");
while(tok != NULL)
add_str(tok, result.environment_vars.value);
}
return result; return result;
} }
@ -60,24 +77,28 @@ void destroy_options(cli_options v) {
free(v.log_file.value); free(v.log_file.value);
if(v.pipeline_log_dir.has_value) if(v.pipeline_log_dir.has_value)
free(v.pipeline_log_dir.value); free(v.pipeline_log_dir.value);
if(v.environment_vars.has_value)
clear_strlist(v.environment_vars.value);
} }
// <max // <max
const char* optstring = "f:L:e:v:Cl:hV"; const char* optstring = "f:L:P:w:v:Cl:e:hV";
const char* help_msg = const char* help_msg =
"%s %s\n" "%s %s\n"
"Usage: [-f file] [-L dir] [-e count] [-v level] \n" "Usage: [-f file] [-L dir] [-P dir] [-w count]\n\n"
" [-C] [-l file] [-h] [-V]\n" " [-v level] [-C] [-l file] [-e ENV] [-h] [-V]\n"
"\n" "\n"
SCI_DESCRIPTION "\n" SCI_DESCRIPTION "\n"
"\n" "\n"
"OPTIONS:\n" "OPTIONS:\n"
" -f file Set sci config file\n" " -f file Set sci config file\n"
" -L dir Set pipeline log output directory\n" " -L dir Set pipeline log output directory\n"
" -e count Set the amount of worker threads\n" " -P dir Set pipeline working directory prefix\n"
" -w count Set the amount of worker threads\n"
" -v level Set verbosity level [0-4]\n" " -v level Set verbosity level [0-4]\n"
" -C Force color output, ignoring $NO_COLOR\n" " -C Force color output, ignoring $NO_COLOR\n"
" -l file Set sci's log to output to a file\n" " -l file Set sci's log to output to a file\n"
" -e ENV Pass an env variable to pipelines\n"
" -h Show this message and exit\n" " -h Show this message and exit\n"
" -V Show version and exit\n" " -V Show version and exit\n"
"\n" "\n"
@ -103,10 +124,14 @@ cli_options parse(int argc, char** argv) {
options.pipeline_log_dir.value = strdup(optarg); options.pipeline_log_dir.value = strdup(optarg);
options.pipeline_log_dir.has_value = true; options.pipeline_log_dir.has_value = true;
break; break;
case 'P':
options.pipeline_cwd.value = strdup(optarg);
options.pipeline_cwd.has_value = true;
break;
case 'v': case 'v':
options.verbosity = atoi(optarg); options.verbosity = atoi(optarg);
break; break;
case 'e': case 'w':
options.executors = atoi(optarg); options.executors = atoi(optarg);
break; break;
case 'C': case 'C':
@ -122,6 +147,13 @@ cli_options parse(int argc, char** argv) {
case 'h': case 'h':
options.help = true; options.help = true;
break; break;
case 'e':
if(!options.environment_vars.has_value) {
options.environment_vars.has_value = true;
options.environment_vars.value = create_strlist_node(optarg);
} else
add_str(optarg, options.environment_vars.value);
break;
default: default:
print_help(stderr, argv[0]); print_help(stderr, argv[0]);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);

182
src/executor.c Normal file
View File

@ -0,0 +1,182 @@
/**
* sci - a simple ci system
Copyright (C) 2024 Asger Gitz-Johansen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "executor.h"
#include "api.h"
#include "log.h"
#include "optional.h"
#include "pipeline.h"
#include "strlist.h"
#include "util.h"
#include <fcntl.h>
#include <linux/limits.h>
#include <spawn.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>
#include <uuid/uuid.h>
const char* log_dir = ".";
const char* cwd = "/tmp";
const strlist_node* shared_environment = NULL;
void set_shared_environment(const strlist_node* root) {
shared_environment = root;
}
void set_logdir(const char* logdir) {
log_dir = logdir;
struct stat st = {0};
if(stat(log_dir, &st) == -1)
mkdir(log_dir, 0700);
}
void set_working_directory(const char* _cwd) {
cwd = _cwd;
struct stat st = {0};
if(stat(cwd, &st) == -1)
mkdir(cwd, 0700);
}
char* create_pipeline_id() {
uuid_t uuid;
uuid_generate(uuid);
// example uuid
// 662ddee9-ee7c-4d13-8999-a2604c6d12d6
// it's 36 characters (+null)
char* pipeline_id = malloc(sizeof(char) * 37);
uuid_unparse_lower(uuid, pipeline_id);
return pipeline_id;
}
optional_int open_logfile(const char* const pipeline_id) {
optional_int result;
result.has_value = false;
result.value = 0;
char* log_filepath = join4(log_dir, "/", pipeline_id, ".log");
int fd = open(log_filepath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd != -1) {
result.has_value = true;
result.value = fd;
} else
perror("open");
free(log_filepath);
return result;
}
void add_joined_str(strlist_node* root, const char* a, const char* b) {
char* tmp = join(a, b);
add_str(tmp, root);
free(tmp);
}
void add_env(strlist_node* root, const char* env) {
char* tmp = join3(env, "=", getenv(env));
add_str(tmp, root);
free(tmp);
}
char** create_environment(const pipeline_event* const e, const char* pipeline_id) {
char* tmp = join("SCI_PIPELINE_NAME=", e->name);
strlist_node* env = create_strlist_node(tmp);
free(tmp);
add_joined_str(env, "SCI_PIPELINE_URL=", e->url);
add_joined_str(env, "SCI_PIPELINE_TRIGGER=", e->trigger);
add_joined_str(env, "SCI_PIPELINE_ID=", pipeline_id);
if(shared_environment != NULL) {
const strlist_node* cursor = shared_environment;
while(cursor != NULL) {
add_env(env, cursor->str);
cursor = cursor->next;
}
}
char** envp = strlist_to_array(env);
clear_strlist(env);
return envp;
}
void executor(void* data) {
// Create pipeline id
char* pipeline_id = create_pipeline_id();
// Create logfile path
optional_int fd = open_logfile(pipeline_id);
if(!fd.has_value) {
log_error("could not open log file - not starting pipeline");
return;
}
// spawn the process
pid_t pid;
const pipeline_event* const e = data;
char** envp = create_environment(e, pipeline_id);
int argc;
char** argv = argv_split(e->command, &argc);
log_trace("executing pipeline %s with argv:", e->name);
for(int i = 0; i < argc; i++)
log_trace(" \"%s\"", argv[i]);
char arg0[PATH_MAX];
if(which(argv[0], arg0, PATH_MAX) == -1)
goto end;
// fork / cwd / exec idiom
pid = fork();
if(pid < 0) {
perror("fork");
goto end; // I know. The raptors have picked up the scent. I'll just have to mask it with more stinky code.
}
if(pid == 0) {
// child process
dup2(fd.value, STDOUT_FILENO);
dup2(fd.value, STDERR_FILENO);
char* pipeline_cwd = join3(cwd, "/", pipeline_id);
struct stat st = {0};
if(stat(pipeline_cwd, &st) == -1)
mkdir(pipeline_cwd, 0700);
chdir(pipeline_cwd);
free(pipeline_cwd);
execvpe(arg0, argv, envp);
return;
}
api_pipeline_started(pipeline_id, e->name);
log_info("{%s} (%s) spawned", pipeline_id, e->name);
// Wait for process to complete
int status;
waitpid(pid, &status, 0);
api_pipeline_ended(pipeline_id, e->name, status);
log_info("{%s} (%s) [pid=%d] exited with status %d", pipeline_id, e->name, pid, status);
char buf[32];
sprintf(buf, "exited with status %d", status);
if(write(fd.value, buf, strnlen(buf, 32)) == -1)
perror("write");
end:
argv_free(argv);
close(fd.value);
free(pipeline_id);
pipeline_event_destroy(data);
char** cursor = envp;
while(*cursor != NULL) {
free(*cursor);
cursor++;
}
free(envp);
}

View File

@ -78,6 +78,7 @@ void log_log(const char* file, int line, int level, const char* fmt, ...) {
g_log_tm = localtime(&t); g_log_tm = localtime(&t);
} }
strftime(timestamp, sizeof(timestamp), "%H:%M:%S", g_log_tm); strftime(timestamp, sizeof(timestamp), "%H:%M:%S", g_log_tm);
g_log_tm = NULL;
const char* level_color = get_level_color(level); const char* level_color = get_level_color(level);
const char* level_name = get_level_name(level); const char* level_name = get_level_name(level);

View File

@ -15,104 +15,41 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
*/ */
#include "api.h"
#include "cli.h" #include "cli.h"
#include "executor.h"
#include "log.h" #include "log.h"
#include "notify.h" #include "notify.h"
#include "pipeline.h" #include "pipeline.h"
#include "threadpool.h" #include "threadpool.h"
#include "util.h" #include "util.h"
#include <fcntl.h> #include <mqueue.h>
#include <spawn.h> #include <signal.h>
#include <stdlib.h>
#include <stdlib.h> #include <stdlib.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <uuid/uuid.h> #include <time.h>
#include <wait.h>
threadpool* pool = NULL; threadpool* worker_pool = NULL;
char* log_dir = "./"; // NOTE: must end with a / char* trigger_dir = "/tmp/sci";
bool config_file_changed = false;
char* create_pipeline_id() {
uuid_t uuid;
uuid_generate(uuid);
char* pipeline_id = malloc(32);
uuid_unparse_lower(uuid, pipeline_id);
return pipeline_id;
}
optional_int open_logfile(const char* const pipeline_id) {
optional_int result;
result.has_value = false;
result.value = 0;
char* log_file = join(pipeline_id, ".log");
char* log_filepath = join(log_dir, log_file);
int fd = open(log_filepath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd != -1) {
result.has_value = true;
result.value = fd;
} else
perror("open");
free(log_file);
free(log_filepath);
return result;
}
void executor(void* data) {
// Create pipeline id
char* pipeline_id = create_pipeline_id();
// Create logfile path
optional_int fd = open_logfile(pipeline_id);
if(!fd.has_value)
return;
// spawn the process
pid_t pid;
posix_spawn_file_actions_t actions;
posix_spawn_file_actions_init(&actions);
posix_spawn_file_actions_adddup2(&actions, fd.value, STDOUT_FILENO);
posix_spawn_file_actions_adddup2(&actions, fd.value, STDERR_FILENO);
const pipeline_event* const e = data;
char* name = join("SCI_PIPELINE_NAME=", e->name);
char* url = join("SCI_PIPELINE_URL=", e->url);
char* trigger = join("SCI_PIPELINE_TRIGGER=", e->trigger);
char* id = join("SCI_PIPELINE_ID=", pipeline_id);
char* envp[] = { name, url, trigger, id, NULL };
char* argv[] = { "/bin/sh", "-c", e->command, NULL };
if(posix_spawn(&pid, "/bin/sh", &actions, NULL, argv, envp) != 0) {
perror("posix_spawn");
goto end; // I know. The raptors have picked up the scent. I'll just have to mask it with more poopy code.
}
log_trace("{%s} (%s) spawned", pipeline_id, e->name);
// Wait for process to complete
int status;
waitpid(pid, &status, 0);
if(WIFEXITED(status))
log_trace("{%s} (%s) exited with status %d", pipeline_id, e->name, WEXITSTATUS(status));
end:
close(fd.value);
free(pipeline_id);
free(name);
free(url);
free(trigger);
free(id);
}
void on_event(pipeline_event* const e) { void on_event(pipeline_event* const e) {
if(!threadpool_add_work(pool, executor, (void*)e)) if(!threadpool_add_work(worker_pool, executor, (void*)e))
log_error("could not add work to the threadpool"); log_error("could not add work to the threadpool");
} }
void* listen_for_changes_thread(void* data) { void listener_thread_cleanup(void* data) {
const pipeline_conf* conf = (const pipeline_conf*)data;
while(1) // TODO: Should be while(sigint_has_not_been_caught) instead
listen_for_changes(conf, &on_event);
// We're now done with the config. // We're now done with the config.
free(conf->name); pipeline_destroy((pipeline_conf*)data);
free(conf->url); }
free(conf->trigger);
free(conf->command); void* listen_for_changes_thread(void* data) {
free(data); pipeline_conf* conf = (pipeline_conf*)data;
pthread_cleanup_push(listener_thread_cleanup, conf);
while(1)
listen_for_changes(conf, &on_event);
pthread_cleanup_pop(1);
return NULL; return NULL;
} }
@ -134,18 +71,31 @@ void config_interpret_line(const char* line) {
log_error("unable to register pipeline"); log_error("unable to register pipeline");
return; return;
} }
char* dest; char* new_trigger_val = join3(trigger_dir, "/", conf.value->trigger);
// NOTE: trigger names are allowed max 32 characters
dest = malloc(sizeof(*dest) * (9+33));
dest[0] = '\0';
strncat(dest, "/tmp/sci/", 10);
strncat(dest, conf.value->trigger, 33);
free(conf.value->trigger); free(conf.value->trigger);
conf.value->trigger = dest; conf.value->trigger = new_trigger_val;
pthread_t t = spawn_listener(conf.value); pthread_t t = spawn_listener(conf.value);
pipeline_register(t); pipeline_register(t);
} }
void on_config_file_changed() {
config_file_changed = true;
pipeline_cancel();
log_info("config file changed, reloading...");
}
void* listen_for_config_changes_thread(void* data) {
while(1)
listen_for_config_changes((const char*)data, &on_config_file_changed);
return NULL;
}
void signal_handler(int signal) {
log_info("signal retrieved");
if(signal == SIGINT)
pipeline_cancel();
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
cli_options args = parse(argc, argv); cli_options args = parse(argc, argv);
log_settings settings; log_settings settings;
@ -154,6 +104,8 @@ int main(int argc, char** argv) {
settings.out_file = args.log_file.has_value ? fopen(args.log_file.value, "w+") : stdout; settings.out_file = args.log_file.has_value ? fopen(args.log_file.value, "w+") : stdout;
log_init(settings); log_init(settings);
signal(SIGINT, signal_handler);
if(args.help) { if(args.help) {
print_help(stdout, argv[0]); print_help(stdout, argv[0]);
exit(EXIT_SUCCESS); exit(EXIT_SUCCESS);
@ -169,24 +121,39 @@ int main(int argc, char** argv) {
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
if(args.pipeline_log_dir.has_value)
log_dir = args.pipeline_log_dir.value;
struct stat st = {0};
if(stat(log_dir, &st) == -1)
mkdir(log_dir, 0700);
if(stat("/tmp/sci", &st) == -1)
mkdir("/tmp/sci", 0700);
if(access(args.config_file.value, F_OK) != 0) { if(access(args.config_file.value, F_OK) != 0) {
fprintf(stderr, "no such file or directory %s\n", args.config_file.value); fprintf(stderr, "no such file or directory %s\n", args.config_file.value);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
pool = threadpool_create(args.executors); if(args.pipeline_log_dir.has_value)
per_line(args.config_file.value, &config_interpret_line); set_logdir(args.pipeline_log_dir.value);
pipeline_loop(); if(args.pipeline_cwd.has_value)
threadpool_destroy(pool); set_working_directory(args.pipeline_cwd.value);
struct stat st = {0};
if(stat(trigger_dir, &st) == -1)
mkdir(trigger_dir, 0700);
if(args.environment_vars.has_value)
set_shared_environment(args.environment_vars.value);
api_start_p();
log_info("spawning trigger thread for config file");
pthread_t conf_listener;
ASSERT_SYSCALL_SUCCESS(pthread_create(&conf_listener, NULL, &listen_for_config_changes_thread, (void*)args.config_file.value));
pthread_setname_np(conf_listener, "sci-conf-listener");
do {
config_file_changed = false;
worker_pool = threadpool_create(args.executors);
per_line(args.config_file.value, &config_interpret_line);
log_info("listening for pipeline invocations");
pipeline_loop();
} while(config_file_changed);
pthread_cancel(conf_listener);
threadpool_destroy(worker_pool);
destroy_options(args);
api_destroy();
} }

View File

@ -18,12 +18,13 @@
#include "notify.h" #include "notify.h"
#include "util.h" #include "util.h"
#include "log.h" #include "log.h"
#include <stdlib.h>
#define EV_SIZE sizeof(struct inotify_event) #define EV_SIZE sizeof(struct inotify_event)
#define BUF_LEN EV_SIZE * 32 #define BUF_LEN EV_SIZE * 32
void listen_for_changes(const pipeline_conf* config, notify_callback callback) { void listen_for_changes(const pipeline_conf* config, notify_callback callback) {
// TODO: callback is a bit slow sometimes. We should also poll once after calling callback // TODO: callback is potentially slow. We should also poll once after calling callback
const char* filename = config->trigger; const char* filename = config->trigger;
if(access(filename, F_OK) != 0) { if(access(filename, F_OK) != 0) {
log_trace("file does not exist yet, creating it."); log_trace("file does not exist yet, creating it.");
@ -36,17 +37,43 @@ void listen_for_changes(const pipeline_conf* config, notify_callback callback) {
log_trace("listening for changes in file: %s", filename); log_trace("listening for changes in file: %s", filename);
char buffer[BUF_LEN]; char buffer[BUF_LEN];
int r = read(fd, buffer, BUF_LEN); int r = read(fd, buffer, BUF_LEN);
assert(r != -1); if(r == -1) {
perror("read");
return;
}
for(int i = 0; i < r; ) { for(int i = 0; i < r; ) {
struct inotify_event* e = (struct inotify_event*)&buffer[i]; struct inotify_event* e = (struct inotify_event*)&buffer[i];
pipeline_event ev; pipeline_event* ev = malloc(sizeof(pipeline_event));
ev.event = e; ev->event = e;
ev.name = config->name; ev->name = strdup(config->name);
ev.url = config->url; ev->url = strdup(config->url);
ev.trigger = config->trigger; ev->trigger = strdup(config->trigger);
ev.command = config->command; ev->command = strdup(config->command);
callback(&ev); callback(ev);
i += EV_SIZE + e->len; i += EV_SIZE + e->len;
} }
ASSERT_SYSCALL_SUCCESS(close(fd)); ASSERT_SYSCALL_SUCCESS(close(fd));
} }
void listen_for_config_changes(const char* config_filepath, config_change_callback callback) {
if(access(config_filepath, F_OK) != 0) {
perror("access");
return;
}
int fd = inotify_init();
ASSERT_SYSCALL_SUCCESS(fd);
inotify_add_watch(fd, config_filepath, IN_ATTRIB);
log_trace("listening for changes in file: %s", config_filepath);
char buffer[BUF_LEN];
int r = read(fd, buffer, BUF_LEN);
if(r == -1) {
perror("read");
return;
}
assert(r != -1);
for(int i = 0; i < r; ) {
callback();
i += EV_SIZE + ((struct inotify_event*)&buffer[i])->len;;
}
ASSERT_SYSCALL_SUCCESS(close(fd));
}

View File

@ -19,12 +19,14 @@
#include "pipeline.h" #include "pipeline.h"
#include "threadlist.h" #include "threadlist.h"
#include "util.h" #include "util.h"
#include <pthread.h>
#include <regex.h> #include <regex.h>
#include <stdlib.h> #include <stdlib.h>
pthread_list_node* root = NULL; pthread_list_node* root = NULL;
optional_pipeline_conf pipeline_create(const char* config_line) { optional_pipeline_conf pipeline_create(const char* config_line) {
log_trace("pipeline create");
optional_pipeline_conf result; optional_pipeline_conf result;
result.has_value = false; result.has_value = false;
const char* pattern = "[^[:blank:]]+|\"[^\"]*\""; const char* pattern = "[^[:blank:]]+|\"[^\"]*\"";
@ -40,15 +42,21 @@ optional_pipeline_conf pipeline_create(const char* config_line) {
break; break;
off = pmatch[0].rm_so + (cursor - config_line); off = pmatch[0].rm_so + (cursor - config_line);
len = pmatch[0].rm_eo - pmatch[0].rm_so; len = pmatch[0].rm_eo - pmatch[0].rm_so;
opts[i] = strndup(config_line + off, len); // Cut off the "-s if it is string-enclosed
if(config_line[off] == '"' && config_line[off+len-1] == '"')
opts[i] = strndup(config_line + off+1, len-2);
else
opts[i] = strndup(config_line + off, len);
cursor += pmatch[0].rm_eo; cursor += pmatch[0].rm_eo;
} }
if(i != 4) { if(i != 4) {
log_error("invalid configuration!\nline is invalid: \"%s\""); log_error("invalid configuration!");
log_error("line is invalid: \"%s\"", config_line);
for(int j = i-1; j >= 0; j--) for(int j = i-1; j >= 0; j--)
free(opts[j]); free(opts[j]);
return result; return result;
} }
regfree(&reg);
result.value = malloc(sizeof(pipeline_conf)); result.value = malloc(sizeof(pipeline_conf));
result.value->name = opts[0]; result.value->name = opts[0];
@ -64,7 +72,17 @@ optional_pipeline_conf pipeline_create(const char* config_line) {
return result; return result;
} }
void pipeline_destroy(pipeline_conf* conf) {
log_trace("pipeline destroy");
free(conf->name);
free(conf->url);
free(conf->trigger);
free(conf->command);
free(conf);
}
void pipeline_register(pthread_t thread) { void pipeline_register(pthread_t thread) {
log_trace("pipeline register thread");
if(root == NULL) { if(root == NULL) {
root = create_thread_node(thread); root = create_thread_node(thread);
return; return;
@ -73,6 +91,35 @@ void pipeline_register(pthread_t thread) {
} }
void pipeline_loop() { void pipeline_loop() {
log_trace("pipeline loop");
clear_thread_list(root); clear_thread_list(root);
root = NULL; root = NULL;
} }
void pipeline_cancel() {
log_trace("cancelling pipeline");
pthread_list_node* cursor = root;
while(cursor != NULL) {
pthread_cancel(cursor->thread);
cursor = cursor->next;
}
}
void pipeline_event_destroy(pipeline_event* ev) {
log_trace("pipeline event destroy");
free(ev->name);
free(ev->trigger);
free(ev->url);
free(ev->command);
free(ev);
}
int pipeline_count() {
int result = 0;
pthread_list_node* cursor = root;
while(cursor != NULL) {
cursor = cursor->next;
result++;
}
return result;
}

View File

@ -42,14 +42,12 @@ The operation of
is configured through a is configured through a
.I pipelines.conf .I pipelines.conf
configuration file (see configuration file (see
.I sci(7) .BR sci (5)
for configuration language details) for configuration language details)
and each pipeline will have an associated pipeline trigger file that can be and each pipeline will have an associated pipeline trigger file that can be
By default, pipeline triggers are placed in /tmp/sci but this can be overridden with the By default, pipeline triggers are placed in /tmp/sci but this can be overridden with the
.OP -x. .OP -x.
.SH EXAMPLES .SH EXAMPLES
A simple example configuration file could look something like the following: A simple example configuration file could look something like the following:
@ -71,7 +69,6 @@ disable all warnings.
.SH AUTHOR .SH AUTHOR
Asger Gitz\-Johansen <asger.gitz@hotmail.com>. Asger Gitz\-Johansen <asger.gitz@hotmail.com>.
\" TODO: decide on license
.SH COPYRIGHT .SH COPYRIGHT
Copyright (C) 2024 Asger Gitz-Johansen Copyright (C) 2024 Asger Gitz-Johansen
@ -88,5 +85,5 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
.SH SEE ALSO .SH "SEE ALSO"
\" TODO: write sci(7) .BR sci (5),

91
src/strlist.c Normal file
View File

@ -0,0 +1,91 @@
#include "strlist.h"
#include <stdlib.h>
#include <string.h>
#define MAX_STRLEN 512
strlist_node* create_strlist_node(char* str) {
strlist_node* new = malloc(sizeof(strlist_node));
new->previous = NULL;
new->next = NULL;
if(str)
new->str = strndup(str, MAX_STRLEN);
else
new->str = NULL;
return new;
}
strlist_node* add_str(char* str, strlist_node* root) {
strlist_node* cursor = root;
while(cursor->next != NULL)
cursor = cursor->next;
strlist_node* new = malloc(sizeof(strlist_node));
new->previous = cursor;
new->next = NULL;
new->str = strndup(str, MAX_STRLEN);
cursor->next = new;
return new;
}
strlist_node* add_str_node(strlist_node* root, strlist_node* node) {
strlist_node* cursor = root;
while(cursor->next != NULL)
cursor = cursor->next;
node->previous = cursor;
node->next = NULL;
cursor->next = node;
return node;
}
void remove_strlist_node(strlist_node* node) {
strlist_node* prev = node->previous;
strlist_node* next = node->next;
if(node->str)
free(node->str);
node->str = NULL;
free(node);
if(prev != NULL)
prev->next = next;
if(next != NULL)
next->previous = prev;
}
void clear_strlist(strlist_node* root) {
strlist_node* cursor = root;
while(cursor != NULL) {
if(cursor->str != NULL)
free(cursor->str);
strlist_node* prev = cursor;
cursor = cursor->next;
free(prev);
}
}
size_t strlist_length(strlist_node* root) {
size_t result = 0;
strlist_node* cursor = root;
while(cursor != NULL) {
result++;
cursor = cursor->next;
}
return result;
}
char** strlist_to_array(strlist_node* root) {
size_t len = strlist_length(root);
if(len <= 0)
return NULL;
char** result = malloc(sizeof(char*) * (len + 1));
for(int i = 0; i < len+1; i++)
result[i] = NULL;
strlist_node* cursor = root;
for(int i = 0; i < len; i++) {
if(cursor == NULL)
break;
if(cursor->str == NULL)
continue;
result[i] = strdup(cursor->str);
cursor = cursor->next;
}
return result;
}

View File

@ -71,7 +71,8 @@ void clear_thread_list(pthread_list_node* root) {
pthread_list_node* cursor = root; pthread_list_node* cursor = root;
while(cursor != NULL) { while(cursor != NULL) {
pthread_join(cursor->thread, NULL); pthread_join(cursor->thread, NULL);
pthread_list_node* prev = cursor;
cursor = cursor->next; cursor = cursor->next;
free(cursor->previous); free(prev);
} }
} }

View File

@ -15,49 +15,180 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
*/ */
#include "util.h"
#include "log.h" #include "log.h"
#include "util.h"
#include <ctype.h> #include <ctype.h>
#include <linux/limits.h>
#include <stdlib.h> #include <stdlib.h>
char* trim(const char* const str) { char* trim(const char* const str) {
char* begin = strdup(str); char* begin = strdup(str);
char* end; char* end;
while(isspace((unsigned char)*begin)) while(isspace((unsigned char)*begin))
begin++; begin++;
if(*begin == 0) if(*begin == 0)
return begin; return begin;
end = begin + strlen(begin) - 1; end = begin + strlen(begin) - 1;
while(end > begin && isspace((unsigned char)*end)) while(end > begin && isspace((unsigned char)*end))
end--; end--;
*(end + 1) = '\0'; *(end + 1) = '\0';
return begin; return begin;
} }
void per_line(const char* file, line_handler handler) { void per_line(const char* file, line_handler handler) {
FILE* stream; FILE* stream;
char* line = NULL; char* line = NULL;
size_t len = 0; size_t len = 0;
ssize_t nread; ssize_t nread;
log_trace("reading file %s", file); log_trace("reading file %s", file);
stream = fopen(file, "r"); stream = fopen(file, "r");
if(stream == NULL) { if(stream == NULL) {
perror("fopen"); perror("fopen");
return; return;
} }
while((nread = getline(&line, &len, stream)) != -1) { while((nread = getline(&line, &len, stream)) != -1) {
char* line_trimmed = trim(line); char* line_trimmed = trim(line);
handler(line_trimmed); handler(line_trimmed);
free(line_trimmed); free(line_trimmed);
} }
free(line); free(line);
fclose(stream); fclose(stream);
} }
char* join(const char* a, const char* b) { char* join(const char* a, const char* b) {
size_t alen = strlen(a); size_t alen = strlen(a);
size_t blen = strlen(b); size_t blen = strlen(b);
char* result = malloc(alen + blen + 1); char* result = malloc(alen + blen + 1);
sprintf(result, "%s%s", a, b); sprintf(result, "%s%s", a, b);
return result; return result;
}
char* join3(const char* a, const char* b, const char* c) {
size_t alen = strlen(a);
size_t blen = strlen(b);
size_t clen = strlen(c);
char* result = malloc(alen + blen + clen + 1);
sprintf(result, "%s%s%s", a, b, c);
return result;
}
char* join4(const char* a, const char* b, const char* c, const char* d) {
size_t alen = strlen(a);
size_t blen = strlen(b);
size_t clen = strlen(c);
size_t dlen = strlen(d);
char* result = malloc(alen + blen + clen + dlen + 1);
sprintf(result, "%s%s%s%s", a, b, c, d);
return result;
}
char* join5(const char* a, const char* b, const char* c, const char* d, const char* e) {
size_t alen = strlen(a);
size_t blen = strlen(b);
size_t clen = strlen(c);
size_t dlen = strlen(d);
size_t elen = strlen(e);
char* result = malloc(alen + blen + clen + dlen + elen + 1);
sprintf(result, "%s%s%s%s%s", a, b, c, d, e);
return result;
}
char* join6(const char* a, const char* b, const char* c, const char* d, const char* e, const char* f) {
size_t alen = strlen(a);
size_t blen = strlen(b);
size_t clen = strlen(c);
size_t dlen = strlen(d);
size_t elen = strlen(e);
size_t flen = strlen(f);
char* result = malloc(alen + blen + clen + dlen + elen + flen + 1);
sprintf(result, "%s%s%s%s%s%s", a, b, c, d, e, f);
return result;
}
const char* skip_arg(const char* cp) {
while(*cp && !isspace(*cp))
cp++;
return cp;
}
char* skip_spaces(const char* str) {
while(isspace(*str))
str++;
return(char*)str;
}
int count_argc(const char* str) {
int count = 0;
while(*str) {
str = skip_spaces(str);
if(!*str)
continue;
count++;
str = skip_arg(str);
}
return count;
}
void argv_free(char** argv) {
for(char** p = argv; *p; p++) {
free(*p);
*p = NULL;
}
free(argv);
}
char** argv_split(const char* str, int* argc_out) {
int argc = count_argc(str);
char** result = calloc(argc+1, sizeof(*result));
if(result == NULL)
return result;
if(argc_out)
*argc_out = argc+1;
char** argvp = result;
while(*str) {
str = skip_spaces(str);
if(!*str)
continue;
const char* p = str;
str = skip_arg(str);
char* t = strndup(p, str-p);
if(t == NULL) {
perror("strndup");
argv_free(result);
return NULL;
}
*argvp++ = t;
}
*argvp = NULL;
return result;
}
int which(const char* program_name, char* out_full_program, int max_path) {
assert(out_full_program);
assert(max_path > 0);
// sanity check - maybe program_name is actually a full-path to begin with
if(access(program_name, X_OK) == 0) {
snprintf(out_full_program, max_path, "%s", program_name);
return 0;
}
char* path = getenv("PATH");
if (path == NULL) {
log_error("PATH environment variable not found.");
return -1;
}
char* path_cpy = strdup(path);
char* dir = strtok(path_cpy, ":");
char full_path[PATH_MAX];
while(dir != NULL) {
snprintf(full_path, sizeof(full_path), "%s/%s", dir, program_name);
if(access(full_path, X_OK) == 0) {
snprintf(out_full_program, max_path, "%s", full_path);
free(path_cpy);
return 0;
}
dir = strtok(NULL, ":");
}
log_error("'%s' not found in PATH", program_name);
free(path_cpy);
return -1;
} }