* [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873.
@ 2024-01-14 20:32 David Pflug
2024-01-17 17:29 ` Mathieu Othacehe
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: David Pflug @ 2024-01-14 20:32 UTC (permalink / raw)
To: 68455; +Cc: David Pflug
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.
Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
gnu/packages/machine-learning.scm | 133 ++++++++++++++++++------------
1 file changed, 78 insertions(+), 55 deletions(-)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 1616738399..0cdfe7bb08 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -22,6 +22,7 @@
;;; Copyright © 2023 Navid Afkhami <navid.afkhami@mdc-berlin.de>
;;; Copyright © 2023 Zheng Junjie <873216071@qq.com>
;;; Copyright © 2023 Troy Figiel <troy@troyfigiel.com>
+;;; Copyright © 2023 David Pflug <david@pflug.io>
;;;
;;; This file is part of GNU Guix.
;;;
@@ -517,63 +518,63 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
- (revision "0"))
- (package
- (name "llama-cpp")
- (version (git-version "0.0.0" revision commit))
- (source
- (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
- (commit (string-append "master-" (string-take commit 7)))))
- (file-name (git-file-name name version))
- (sha256
- (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
- (build-system cmake-build-system)
- (arguments
- (list
- #:modules '((ice-9 textual-ports)
- (guix build utils)
- ((guix build python-build-system) #:prefix python:)
- (guix build cmake-build-system))
- #:imported-modules `(,@%cmake-build-system-modules
- (guix build python-build-system))
- #:phases
- #~(modify-phases %standard-phases
- (add-before 'install 'install-python-scripts
- (lambda _
- (let ((bin (string-append #$output "/bin/")))
- (define (make-script script)
- (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
- (call-with-input-file
- (string-append "../source/" script suffix)
- (lambda (input)
- (call-with-output-file (string-append bin script)
- (lambda (output)
- (format output "#!~a/bin/python3\n~a"
- #$(this-package-input "python")
- (get-string-all input))))))
- (chmod (string-append bin script) #o555)))
- (mkdir-p bin)
- (make-script "convert-pth-to-ggml")
- (make-script "convert-lora-to-ggml")
- (make-script "convert"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
- (replace 'install
- (lambda _
- (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
- (inputs (list python))
- (propagated-inputs
- (list python-numpy python-pytorch python-sentencepiece))
- (home-page "https://github.com/ggerganov/llama.cpp")
- (synopsis "Port of Facebook's LLaMA model in C/C++")
- (description "This package provides a port to Facebook's LLaMA collection
+ (package
+ (name "llama-cpp")
+ (version "1873")
+ (source
+ (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://github.com/ggerganov/llama.cpp")
+ (commit (string-append "b" version))))
+ (file-name (git-file-name name version))
+ (sha256
+ (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+ (build-system cmake-build-system)
+ (arguments
+ (list
+ #:modules '((ice-9 textual-ports)
+ (guix build utils)
+ ((guix build python-build-system) #:prefix python:)
+ (guix build cmake-build-system))
+ #:imported-modules `(,@%cmake-build-system-modules
+ (guix build python-build-system))
+ #:phases
+ #~(modify-phases %standard-phases
+ (add-before 'install 'install-python-scripts
+ (lambda _
+ (let ((bin (string-append #$output "/bin/")))
+ (define (make-script script)
+ (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+ (call-with-input-file
+ (string-append "../source/" script suffix)
+ (lambda (input)
+ (call-with-output-file (string-append bin script)
+ (lambda (output)
+ (format output "#!~a/bin/python3\n~a"
+ #$(this-package-input "python")
+ (get-string-all input))))))
+ (chmod (string-append bin script) #o555)))
+ (mkdir-p bin)
+ (make-script "convert-hf-to-gguf")
+ (make-script "convert-llama-ggml-to-gguf")
+ (make-script "convert-lora-to-ggml")
+ (make-script "convert-persimmon-to-gguf")
+ (make-script "convert"))))
+ (add-after 'install-python-scripts 'wrap-python-scripts
+ (assoc-ref python:%standard-phases 'wrap))
+ (replace 'install
+ (lambda _
+ (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+ (inputs (list python))
+ (propagated-inputs
+ (list python-numpy python-pytorch python-sentencepiece python-gguf))
+ (home-page "https://github.com/ggerganov/llama.cpp")
+ (synopsis "Port of Facebook's LLaMA model in C/C++")
+ (description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
independently to be able to run a LLaMA model.")
- (license license:expat))))
+ (license license:expat)))
(define-public mcl
(package
@@ -5257,3 +5258,25 @@ (define-public oneapi-dnnl
"OneAPI Deep Neural Network Library (oneDNN) is a cross-platform
performance library of basic building blocks for deep learning applications.")
(license license:asl2.0)))
+
+(define-public python-gguf
+ (package
+ (name "python-gguf")
+ (version "0.6.0")
+ (source
+ (origin
+ (method url-fetch)
+ (uri (pypi-uri "gguf" version))
+ (sha256
+ (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
+ (build-system pyproject-build-system)
+ (arguments
+ `(#:phases
+ (modify-phases %standard-phases
+ (delete 'check))))
+ (inputs (list poetry python-pytest))
+ (propagated-inputs (list python-numpy))
+ (home-page "https://ggml.ai")
+ (synopsis "Read and write ML models in GGUF for GGML")
+ (description "Read and write ML models in GGUF for GGML")
+ (license license:expat)))
base-commit: 18393fcdddf5c3d834fa89ebf5f3925fc5b166ed
--
2.41.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873.
2024-01-14 20:32 [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873 David Pflug
@ 2024-01-17 17:29 ` Mathieu Othacehe
2024-01-26 12:20 ` [bug#68455] [PATCH v2] " David Pflug
2024-11-23 1:26 ` [bug#68455] [PATCH] " David Pflug
2 siblings, 0 replies; 4+ messages in thread
From: Mathieu Othacehe @ 2024-01-17 17:29 UTC (permalink / raw)
To: David Pflug; +Cc: 68455
Hello David,
> +(define-public python-gguf
> + (package
> + (name "python-gguf")
> + (version "0.6.0")
> + (source
> + (origin
> + (method url-fetch)
> + (uri (pypi-uri "gguf" version))
> + (sha256
> + (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
> + (build-system pyproject-build-system)
> + (arguments
> + `(#:phases
> + (modify-phases %standard-phases
> + (delete 'check))))
> + (inputs (list poetry python-pytest))
> + (propagated-inputs (list python-numpy))
> + (home-page "https://ggml.ai")
> + (synopsis "Read and write ML models in GGUF for GGML")
> + (description "Read and write ML models in GGUF for GGML")
> + (license license:expat)))
This should be part of a separate patch. Can you send a v2?
Thanks,
Mathieu
^ permalink raw reply [flat|nested] 4+ messages in thread
* [bug#68455] [PATCH v2] gnu: llama-cpp: Update to 1873.
2024-01-14 20:32 [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873 David Pflug
2024-01-17 17:29 ` Mathieu Othacehe
@ 2024-01-26 12:20 ` David Pflug
2024-11-23 1:26 ` [bug#68455] [PATCH] " David Pflug
2 siblings, 0 replies; 4+ messages in thread
From: David Pflug @ 2024-01-26 12:20 UTC (permalink / raw)
To: 68455; +Cc: David Pflug
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.
python-gguf added by #68735
Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
gnu/packages/machine-learning.scm | 110 +++++++++++++++---------------
1 file changed, 55 insertions(+), 55 deletions(-)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 0e88f7265b..1d590d1c1b 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -519,63 +519,63 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
- (revision "0"))
- (package
- (name "llama-cpp")
- (version (git-version "0.0.0" revision commit))
- (source
- (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
- (commit (string-append "master-" (string-take commit 7)))))
- (file-name (git-file-name name version))
- (sha256
- (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
- (build-system cmake-build-system)
- (arguments
- (list
- #:modules '((ice-9 textual-ports)
- (guix build utils)
- ((guix build python-build-system) #:prefix python:)
- (guix build cmake-build-system))
- #:imported-modules `(,@%cmake-build-system-modules
- (guix build python-build-system))
- #:phases
- #~(modify-phases %standard-phases
- (add-before 'install 'install-python-scripts
- (lambda _
- (let ((bin (string-append #$output "/bin/")))
- (define (make-script script)
- (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
- (call-with-input-file
- (string-append "../source/" script suffix)
- (lambda (input)
- (call-with-output-file (string-append bin script)
- (lambda (output)
- (format output "#!~a/bin/python3\n~a"
- #$(this-package-input "python")
- (get-string-all input))))))
- (chmod (string-append bin script) #o555)))
- (mkdir-p bin)
- (make-script "convert-pth-to-ggml")
- (make-script "convert-lora-to-ggml")
- (make-script "convert"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
- (replace 'install
- (lambda _
- (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
- (inputs (list python))
- (propagated-inputs
- (list python-numpy python-pytorch python-sentencepiece))
- (home-page "https://github.com/ggerganov/llama.cpp")
- (synopsis "Port of Facebook's LLaMA model in C/C++")
- (description "This package provides a port to Facebook's LLaMA collection
+ (package
+ (name "llama-cpp")
+ (version "1873")
+ (source
+ (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://github.com/ggerganov/llama.cpp")
+ (commit (string-append "b" version))))
+ (file-name (git-file-name name version))
+ (sha256
+ (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+ (build-system cmake-build-system)
+ (arguments
+ (list
+ #:modules '((ice-9 textual-ports)
+ (guix build utils)
+ ((guix build python-build-system) #:prefix python:)
+ (guix build cmake-build-system))
+ #:imported-modules `(,@%cmake-build-system-modules
+ (guix build python-build-system))
+ #:phases
+ #~(modify-phases %standard-phases
+ (add-before 'install 'install-python-scripts
+ (lambda _
+ (let ((bin (string-append #$output "/bin/")))
+ (define (make-script script)
+ (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+ (call-with-input-file
+ (string-append "../source/" script suffix)
+ (lambda (input)
+ (call-with-output-file (string-append bin script)
+ (lambda (output)
+ (format output "#!~a/bin/python3\n~a"
+ #$(this-package-input "python")
+ (get-string-all input))))))
+ (chmod (string-append bin script) #o555)))
+ (mkdir-p bin)
+ (make-script "convert-hf-to-gguf")
+ (make-script "convert-llama-ggml-to-gguf")
+ (make-script "convert-lora-to-ggml")
+ (make-script "convert-persimmon-to-gguf")
+ (make-script "convert"))))
+ (add-after 'install-python-scripts 'wrap-python-scripts
+ (assoc-ref python:%standard-phases 'wrap))
+ (replace 'install
+ (lambda _
+ (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+ (inputs (list python))
+ (propagated-inputs
+ (list python-numpy python-pytorch python-sentencepiece python-gguf))
+ (home-page "https://github.com/ggerganov/llama.cpp")
+ (synopsis "Port of Facebook's LLaMA model in C/C++")
+ (description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
independently to be able to run a LLaMA model.")
- (license license:expat))))
+ (license license:expat)))
(define-public mcl
(package
base-commit: c5453fbfeb0dbd19cb402199fe1e5ad51a051e56
--
2.41.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873.
2024-01-14 20:32 [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873 David Pflug
2024-01-17 17:29 ` Mathieu Othacehe
2024-01-26 12:20 ` [bug#68455] [PATCH v2] " David Pflug
@ 2024-11-23 1:26 ` David Pflug
2 siblings, 0 replies; 4+ messages in thread
From: David Pflug @ 2024-11-23 1:26 UTC (permalink / raw)
To: 68455
This can be closed. The package has moved on well beyond this commit.
See #70883.
Thanks,
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-11-23 1:28 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-01-14 20:32 [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873 David Pflug
2024-01-17 17:29 ` Mathieu Othacehe
2024-01-26 12:20 ` [bug#68455] [PATCH v2] " David Pflug
2024-11-23 1:26 ` [bug#68455] [PATCH] " David Pflug
Code repositories for project(s) associated with this external index
https://git.savannah.gnu.org/cgit/guix.git
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.