
Eliseo Ramos Feliciano
Supervisory Patent Examiner (ID: 8243, Phone: (571)272-7925 , Office: P/2863 )
| Most Active Art Unit | 2617 |
| Art Unit(s) | 2857, 2617, 2681, 2895, 2745, 2817, 2687, 2682 |
| Total Applications | 285 |
| Issued Applications | 150 |
| Pending Applications | 66 |
| Abandoned Applications | 73 |
Applications
| Application number | Title of the application | Filing Date | Status |
|---|---|---|---|
Array
(
[id] => 20174682
[patent_doc_number] => 12393429
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2025-08-19
[patent_title] => Atomic instruction set and architecture with bus arbitration locking
[patent_app_type] => utility
[patent_app_number] => 18/302874
[patent_app_country] => US
[patent_app_date] => 2023-04-19
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 12
[patent_figures_cnt] => 12
[patent_no_of_words] => 1148
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 4
[patent_words_short_claim] => 114
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18302874
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/302874 | Atomic instruction set and architecture with bus arbitration locking | Apr 18, 2023 | Issued |
Array
(
[id] => 19313187
[patent_doc_number] => 12039001
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-07-16
[patent_title] => Scalable sparse matrix multiply acceleration using systolic arrays with feedback inputs
[patent_app_type] => utility
[patent_app_number] => 18/301386
[patent_app_country] => US
[patent_app_date] => 2023-04-17
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 37
[patent_figures_cnt] => 39
[patent_no_of_words] => 29796
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 143
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18301386
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/301386 | Scalable sparse matrix multiply acceleration using systolic arrays with feedback inputs | Apr 16, 2023 | Issued |
Array
(
[id] => 18810779
[patent_doc_number] => 20230385115
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-11-30
[patent_title] => TECHNIQUE FOR HARDWARE ACTIVATION FUNCTION COMPUTATION IN RNS ARTIFICIAL NEURAL NETWORKS
[patent_app_type] => utility
[patent_app_number] => 18/193635
[patent_app_country] => US
[patent_app_date] => 2023-03-31
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 6207
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -17
[patent_words_short_claim] => 98
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18193635
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/193635 | Technique for hardware activation function computation in RNS artificial neural networks | Mar 30, 2023 | Issued |
Array
(
[id] => 19523149
[patent_doc_number] => 12124879
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-10-22
[patent_title] => Deep neural network accelerator for optimized data processing, and control method of the deep neural network accelerator
[patent_app_type] => utility
[patent_app_number] => 18/127875
[patent_app_country] => US
[patent_app_date] => 2023-03-29
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 15
[patent_figures_cnt] => 15
[patent_no_of_words] => 14049
[patent_no_of_claims] => 19
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 203
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18127875
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/127875 | Deep neural network accelerator for optimized data processing, and control method of the deep neural network accelerator | Mar 28, 2023 | Issued |
Array
(
[id] => 18513221
[patent_doc_number] => 20230229449
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-07-20
[patent_title] => PARALLEL DECISION SYSTEM AND METHOD FOR DISTRIBUTED DATA PROCESSING
[patent_app_type] => utility
[patent_app_number] => 18/125020
[patent_app_country] => US
[patent_app_date] => 2023-03-22
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 15346
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -6
[patent_words_short_claim] => 671
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18125020
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/125020 | Parallel decision system and method for distributed data processing | Mar 21, 2023 | Issued |
Array
(
[id] => 19827648
[patent_doc_number] => 12248429
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2025-03-11
[patent_title] => Network computer with two embedded rings
[patent_app_type] => utility
[patent_app_number] => 18/185880
[patent_app_country] => US
[patent_app_date] => 2023-03-17
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 13
[patent_figures_cnt] => 24
[patent_no_of_words] => 13887
[patent_no_of_claims] => 22
[patent_no_of_ind_claims] => 2
[patent_words_short_claim] => 238
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18185880
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/185880 | Network computer with two embedded rings | Mar 16, 2023 | Issued |
Array
(
[id] => 19552261
[patent_doc_number] => 12135970
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-11-05
[patent_title] => System and method for synchronizing processing between a plurality of processors
[patent_app_type] => utility
[patent_app_number] => 18/185416
[patent_app_country] => US
[patent_app_date] => 2023-03-17
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 3
[patent_figures_cnt] => 3
[patent_no_of_words] => 6693
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 238
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18185416
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/185416 | System and method for synchronizing processing between a plurality of processors | Mar 16, 2023 | Issued |
Array
(
[id] => 19459347
[patent_doc_number] => 12099840
[patent_country] => US
[patent_kind] => B1
[patent_issue_date] => 2024-09-24
[patent_title] => Throughput increase for tensor operations
[patent_app_type] => utility
[patent_app_number] => 18/185236
[patent_app_country] => US
[patent_app_date] => 2023-03-16
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 8
[patent_figures_cnt] => 8
[patent_no_of_words] => 15498
[patent_no_of_claims] => 21
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 155
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18185236
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/185236 | Throughput increase for tensor operations | Mar 15, 2023 | Issued |
Array
(
[id] => 18614276
[patent_doc_number] => 20230281013
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-09-07
[patent_title] => Machine Code Instruction
[patent_app_type] => utility
[patent_app_number] => 18/176034
[patent_app_country] => US
[patent_app_date] => 2023-02-28
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 12853
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -25
[patent_words_short_claim] => 94
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18176034
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/176034 | Machine code instruction | Feb 27, 2023 | Issued |
Array
(
[id] => 18471247
[patent_doc_number] => 20230205533
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-06-29
[patent_title] => SYSTEMS AND METHODS FOR PERFORMING NEURAL NETWORK OPERATIONS
[patent_app_type] => utility
[patent_app_number] => 18/111661
[patent_app_country] => US
[patent_app_date] => 2023-02-20
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 49121
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -22
[patent_words_short_claim] => 156
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18111661
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/111661 | SYSTEMS AND METHODS FOR PERFORMING NEURAL NETWORK OPERATIONS | Feb 19, 2023 | Abandoned |
Array
(
[id] => 19375824
[patent_doc_number] => 12067394
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-08-20
[patent_title] => Native support for execution of get exponent, get mantisssa, and scale instructions within a graphics processing unit via reuse of fused multiply-add execution unit hardware logic
[patent_app_type] => utility
[patent_app_number] => 18/170696
[patent_app_country] => US
[patent_app_date] => 2023-02-17
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 32
[patent_figures_cnt] => 33
[patent_no_of_words] => 28304
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 149
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18170696
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/170696 | Native support for execution of get exponent, get mantisssa, and scale instructions within a graphics processing unit via reuse of fused multiply-add execution unit hardware logic | Feb 16, 2023 | Issued |
Array
(
[id] => 19340793
[patent_doc_number] => 12050986
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-07-30
[patent_title] => Neural network architecture using convolution engines
[patent_app_type] => utility
[patent_app_number] => 18/104749
[patent_app_country] => US
[patent_app_date] => 2023-02-01
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 7
[patent_figures_cnt] => 9
[patent_no_of_words] => 17532
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 120
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18104749
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/104749 | Neural network architecture using convolution engines | Jan 31, 2023 | Issued |
Array
(
[id] => 18378023
[patent_doc_number] => 20230153110
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-05-18
[patent_title] => PRE-STAGED INSTRUCTION REGISTERS FOR VARIABLE LENGTH INSTRUCTION SET MACHINE
[patent_app_type] => utility
[patent_app_number] => 18/098068
[patent_app_country] => US
[patent_app_date] => 2023-01-17
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 14053
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -19
[patent_words_short_claim] => 73
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18098068
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/098068 | Pre-staged instruction registers for variable length instruction set machine | Jan 16, 2023 | Issued |
Array
(
[id] => 18471215
[patent_doc_number] => 20230205501
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-06-29
[patent_title] => COMPILER-BASED INPUT SYNCHRONIZATION FOR PROCESSOR WITH VARIANT STAGE LATENCIES
[patent_app_type] => utility
[patent_app_number] => 18/089157
[patent_app_country] => US
[patent_app_date] => 2022-12-27
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 16546
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -17
[patent_words_short_claim] => 122
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18089157
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/089157 | Compiler-based input synchronization for processor with variant stage latencies | Dec 26, 2022 | Issued |
Array
(
[id] => 19458974
[patent_doc_number] => 12099463
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-09-24
[patent_title] => Hierarchical ring-based interconnection network for symmetric multiprocessors
[patent_app_type] => utility
[patent_app_number] => 18/067790
[patent_app_country] => US
[patent_app_date] => 2022-12-19
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 11
[patent_figures_cnt] => 11
[patent_no_of_words] => 12966
[patent_no_of_claims] => 10
[patent_no_of_ind_claims] => 2
[patent_words_short_claim] => 125
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18067790
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/067790 | Hierarchical ring-based interconnection network for symmetric multiprocessors | Dec 18, 2022 | Issued |
Array
(
[id] => 18322732
[patent_doc_number] => 20230120860
[patent_country] => US
[patent_kind] => A1
[patent_issue_date] => 2023-04-20
[patent_title] => GRAPH INSTRUCTION PROCESSING METHOD AND APPARATUS
[patent_app_type] => utility
[patent_app_number] => 18/067538
[patent_app_country] => US
[patent_app_date] => 2022-12-16
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 0
[patent_figures_cnt] => 0
[patent_no_of_words] => 13897
[patent_no_of_claims] => 0
[patent_no_of_ind_claims] => -17
[patent_words_short_claim] => 2
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => publication
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18067538
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/067538 | Graph instruction processing method and apparatus | Dec 15, 2022 | Issued |
Array
(
[id] => 19014737
[patent_doc_number] => 11921667
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-03-05
[patent_title] => Reconfigurable computing chip
[patent_app_type] => utility
[patent_app_number] => 18/077362
[patent_app_country] => US
[patent_app_date] => 2022-12-08
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 5
[patent_figures_cnt] => 5
[patent_no_of_words] => 10336
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 2
[patent_words_short_claim] => 110
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18077362
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/077362 | Reconfigurable computing chip | Dec 7, 2022 | Issued |
Array
(
[id] => 19243372
[patent_doc_number] => 12013811
[patent_country] => US
[patent_kind] => B1
[patent_issue_date] => 2024-06-18
[patent_title] => System and method of workload management for distributing workload over SIMD based parallel processing architecture
[patent_app_type] => utility
[patent_app_number] => 18/060615
[patent_app_country] => US
[patent_app_date] => 2022-12-01
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 4
[patent_figures_cnt] => 4
[patent_no_of_words] => 8327
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 2
[patent_words_short_claim] => 206
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18060615
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/060615 | System and method of workload management for distributing workload over SIMD based parallel processing architecture | Nov 30, 2022 | Issued |
Array
(
[id] => 19293137
[patent_doc_number] => 12032490
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2024-07-09
[patent_title] => Method and apparatus for vector sorting
[patent_app_type] => utility
[patent_app_number] => 18/073313
[patent_app_country] => US
[patent_app_date] => 2022-12-01
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 37
[patent_figures_cnt] => 55
[patent_no_of_words] => 32238
[patent_no_of_claims] => 18
[patent_no_of_ind_claims] => 2
[patent_words_short_claim] => 99
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18073313
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/073313 | Method and apparatus for vector sorting | Nov 30, 2022 | Issued |
Array
(
[id] => 19719358
[patent_doc_number] => 12204897
[patent_country] => US
[patent_kind] => B2
[patent_issue_date] => 2025-01-21
[patent_title] => Application programming interface to wait on matrix multiply-accumulate
[patent_app_type] => utility
[patent_app_number] => 18/072081
[patent_app_country] => US
[patent_app_date] => 2022-11-30
[patent_effective_date] => 0000-00-00
[patent_drawing_sheets_cnt] => 46
[patent_figures_cnt] => 46
[patent_no_of_words] => 66151
[patent_no_of_claims] => 20
[patent_no_of_ind_claims] => 3
[patent_words_short_claim] => 42
[patent_maintenance] => 1
[patent_no_of_assignments] => 0
[patent_current_assignee] =>[type] => patent
[pdf_file] =>[firstpage_image] =>[orig_patent_app_number] => 18072081
[rel_patent_id] =>[rel_patent_doc_number] =>) 18/072081 | Application programming interface to wait on matrix multiply-accumulate | Nov 29, 2022 | Issued |